diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c42cbd2dee..6e853f2a851 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ To learn more about active deprecations, we recommend checking [GitHub Discussio ### New +- **General**: A new experimental Kafka Scaler implementation using kafka-go library `apache-kafka` ([#4692](https://github.com/kedacore/keda/issues/4692)) - **General**: Introduce new Google Cloud Tasks scaler functionality to scale based on the queue length ([#3613](https://github.com/kedacore/keda/issues/3613)) - **AWS SQS Scaler**: Support for scaling to include delayed messages. ([#4377](https://github.com/kedacore/keda/issues/4377)) - **Governance**: KEDA transitioned to CNCF Graduated project ([#63](https://github.com/kedacore/governance/issues/63)) diff --git a/go.mod b/go.mod index 71c6ceffb77..9dd66009f73 100644 --- a/go.mod +++ b/go.mod @@ -70,6 +70,8 @@ require ( github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/redis/go-redis/v9 v9.1.0 github.com/robfig/cron/v3 v3.0.1 + github.com/segmentio/kafka-go v0.4.42 + github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 v0.1.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 github.com/tidwall/gjson v1.16.0 diff --git a/go.sum b/go.sum index 48ebbaa9242..dff447a5632 100644 --- a/go.sum +++ b/go.sum @@ -147,20 +147,27 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go-v2 v1.16.12/go.mod h1:C+Ym0ag2LIghJbXhfXZ0YEEp49rBWowxKzJLUoob0ts= github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc= github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= +github.com/aws/aws-sdk-go-v2/config v1.17.2/go.mod h1:jumS/AMwul4WaG8vyXsF6kUndG9zndR+yfYBwl4i9ds= github.com/aws/aws-sdk-go-v2/config v1.18.38 h1:CByQCELMgm2tM1lAehx3XNg0R/pfeXsYzqn0Aq2chJQ= github.com/aws/aws-sdk-go-v2/config v1.18.38/go.mod h1:vNm9Hf5VgG2fSUWhT3zFrqN/RosGcabFMYgiSoxKFU8= +github.com/aws/aws-sdk-go-v2/credentials v1.12.15/go.mod h1:41zTC6U/78fUD7ZCa5NymTJANDjfqySg5YEAYVFl2Ic= github.com/aws/aws-sdk-go-v2/credentials v1.13.36 h1:ps0cPswZjpsOk6sLwG6fdXTzrYjCplgPEyG3OUbbdqE= github.com/aws/aws-sdk-go-v2/credentials v1.13.36/go.mod h1:sY2phUzxbygoyDtTXhqi7GjGjCQ1S5a5Rj8u3ksBxCg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.13/go.mod h1:y0eXmsNBFIVjUE8ZBjES8myOHlMsXDz7qGT93+MVdjk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.19/go.mod h1:llxE6bwUZhuCas0K7qGiu5OgMis3N7kdWtFSxoHmJ7E= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.13/go.mod h1:lB12mkZqCSo5PsdBFLNqc2M/OOYgNAy8UtaktyuWvE8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.20/go.mod h1:bfTcsThj5a9P5pIGRy0QudJ8k4+issxXX+O6Djnd5Cs= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 h1:GPUcE/Yq7Ur8YSUk6lVkoIMWnJNO0HT18GUzCWCgCI0= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.27.6 h1:YUQGnci0QY+X+tu7XI7zy2vnUjmuUw0VT4OC1SikKIw= @@ -173,18 +180,23 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 h1:m0QTSI6 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14/go.mod h1:dDilntgHy9WnHXsh7dDtUPgHKEfTJIBUTHM8OWm0f/0= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.35 h1:UKjpIDLVF90RfV88XurdduMoTxPqtGHZMIDYZQM7RO4= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.7.35/go.mod h1:B3dUg0V6eJesUTi+m27NUkj7n8hdDKYUpxj8f4+TqaQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.13/go.mod h1:V390DK4MQxLpDdXxFqizyz8KUxuWImkW/xzgXMz0yyk= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= github.com/aws/aws-sdk-go-v2/service/kinesis v1.18.5 h1:naSZmQiFjoTLxNjfDy/KgEnWdG3odkR6gIEgTx21YOM= github.com/aws/aws-sdk-go-v2/service/kinesis v1.18.5/go.mod h1:0h3hOcyFXyjvI3wGt8C8vk2+II9XxHwFM7zH2KvLHmA= github.com/aws/aws-sdk-go-v2/service/sqs v1.24.5 h1:RyDpTOMEJO6ycxw1vU/6s0KLFaH3M0z/z9gXHSndPTk= github.com/aws/aws-sdk-go-v2/service/sqs v1.24.5/go.mod h1:RZBu4jmYz3Nikzpu/VuVvRnTEJ5a+kf36WT2fcl5Q+Q= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.18/go.mod h1:ytmEi5+qwcSNcV2pVA8PIb1DnKT/0Bu/K4nfJHwoM6c= github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 h1:2PylFCfKCEDv6PeSN09pC/VUiRd10wi1VfHG5FrW0/g= github.com/aws/aws-sdk-go-v2/service/sso v1.13.6/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.1/go.mod h1:NY+G+8PW0ISyJ7/6t5mgOe6qpJiwZa9Jix05WPscJjg= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5 h1:dnInJb4S0oy8aQuri1mV6ipLlnZPfnsDNB9BGO9PDNY= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.14/go.mod h1:Y+BUV19q3OmQVqNUlbZ40zVi3NM6Biuxwkx/qdSD/CY= github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 h1:CQBFElb0LS8RojMJlxRSo/HXipvTZW2S44Lt9Mk2aYQ= github.com/aws/aws-sdk-go-v2/service/sts v1.21.5/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU= +github.com/aws/smithy-go v1.13.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ= github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -600,6 +612,7 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= @@ -719,6 +732,7 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= @@ -774,6 +788,11 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/samber/lo v1.37.0 h1:XjVcB8g6tgUp8rsPsJ2CvhClfImrpL04YpQHXeHPhRw= github.com/samber/lo v1.37.0/go.mod h1:9vaz2O4o8oOnK23pd2TrXufcbdbJIa3b6cstBWKpopA= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.4.34/go.mod h1:GAjxBQJdQMB5zfNA21AhpaqOB2Mu+w3De4ni3Gbm8y0= +github.com/segmentio/kafka-go v0.4.42 h1:qffhBZCz4WcWyNuHEclHjIMLs2slp6mZO8px+5W5tfU= +github.com/segmentio/kafka-go v0.4.42/go.mod h1:d0g15xPMqoUookug0OU75DhGZxXwCFxSLeJ4uphwJzg= +github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 v0.1.0 h1:Fjet4CFbGyWMbvwWb42PKZwKdpDksSB7eaPi9Ap6EKY= +github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 v0.1.0/go.mod h1:zk5DCsbNtQ0BhooxFaVpLBns0tArkR/xE+4oq2MvCq0= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -1012,6 +1031,7 @@ golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220706163947-c90051bbdb60/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= diff --git a/pkg/scalers/apache_kafka_scaler.go b/pkg/scalers/apache_kafka_scaler.go new file mode 100644 index 00000000000..af76cf24dcd --- /dev/null +++ b/pkg/scalers/apache_kafka_scaler.go @@ -0,0 +1,697 @@ +/* +Copyright 2023 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Please note that this is an experimental scaler based on the kafka-go library. + +package scalers + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/go-logr/logr" + "github.com/segmentio/kafka-go" + "github.com/segmentio/kafka-go/sasl" + "github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2" + "github.com/segmentio/kafka-go/sasl/plain" + "github.com/segmentio/kafka-go/sasl/scram" + v2 "k8s.io/api/autoscaling/v2" + "k8s.io/metrics/pkg/apis/external_metrics" + + kedautil "github.com/kedacore/keda/v2/pkg/util" +) + +type apacheKafkaScaler struct { + metricType v2.MetricTargetType + metadata apacheKafkaMetadata + client *kafka.Client + logger logr.Logger + previousOffsets map[string]map[int]int64 +} + +type apacheKafkaMetadata struct { + bootstrapServers []string + group string + topic []string + partitionLimitation []int32 + lagThreshold int64 + activationLagThreshold int64 + offsetResetPolicy offsetResetPolicy + allowIdleConsumers bool + excludePersistentLag bool + + // If an invalid offset is found, whether to scale to 1 (false - the default) so consumption can + // occur or scale to 0 (true). See discussion in https://github.com/kedacore/keda/issues/2612 + scaleToZeroOnInvalidOffset bool + + // SASL + saslType kafkaSaslType + username string + password string + + // MSK + awsRegion string + awsEndpoint string + awsAuthorization awsAuthorizationMetadata + + // TLS + enableTLS bool + cert string + key string + keyPassword string + ca string + + scalerIndex int +} + +const ( + KafkaSASLTypeMskIam = "aws_msk_iam" +) + +// NewApacheKafkaScaler creates a new apacheKafkaScaler +func NewApacheKafkaScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { + metricType, err := GetMetricTargetType(config) + if err != nil { + return nil, fmt.Errorf("error getting scaler metric type: %w", err) + } + + logger := InitializeLogger(config, "apache_kafka_scaler") + + kafkaMetadata, err := parseApacheKafkaMetadata(config, logger) + if err != nil { + return nil, fmt.Errorf("error parsing kafka metadata: %w", err) + } + + client, err := getApacheKafkaClient(ctx, kafkaMetadata, logger) + if err != nil { + return nil, err + } + + previousOffsets := make(map[string]map[int]int64) + + return &apacheKafkaScaler{ + client: client, + metricType: metricType, + metadata: kafkaMetadata, + logger: logger, + previousOffsets: previousOffsets, + }, nil +} + +func parseApacheKafkaAuthParams(config *ScalerConfig, meta *apacheKafkaMetadata) error { + meta.enableTLS = false + enableTLS := false + if val, ok := config.TriggerMetadata["tls"]; ok { + switch val { + case stringEnable: + enableTLS = true + case stringDisable: + enableTLS = false + default: + return fmt.Errorf("error incorrect TLS value given, got %s", val) + } + } + + if val, ok := config.AuthParams["tls"]; ok { + val = strings.TrimSpace(val) + if enableTLS { + return errors.New("unable to set `tls` in both ScaledObject and TriggerAuthentication together") + } + switch val { + case stringEnable: + enableTLS = true + case stringDisable: + enableTLS = false + default: + return fmt.Errorf("error incorrect TLS value given, got %s", val) + } + } + + if enableTLS { + certGiven := config.AuthParams["cert"] != "" + keyGiven := config.AuthParams["key"] != "" + if certGiven && !keyGiven { + return errors.New("key must be provided with cert") + } + if keyGiven && !certGiven { + return errors.New("cert must be provided with key") + } + meta.ca = config.AuthParams["ca"] + meta.cert = config.AuthParams["cert"] + meta.key = config.AuthParams["key"] + if value, found := config.AuthParams["keyPassword"]; found { + meta.keyPassword = value + } else { + meta.keyPassword = "" + } + meta.enableTLS = true + } + + meta.saslType = KafkaSASLTypeNone + var saslAuthType string + switch { + case config.TriggerMetadata["sasl"] != "": + saslAuthType = config.TriggerMetadata["sasl"] + default: + saslAuthType = "" + } + if val, ok := config.AuthParams["sasl"]; ok { + if saslAuthType != "" { + return errors.New("unable to set `sasl` in both ScaledObject and TriggerAuthentication together") + } + saslAuthType = val + } + + if saslAuthType != "" { + saslAuthType = strings.TrimSpace(saslAuthType) + switch mode := kafkaSaslType(saslAuthType); mode { + case KafkaSASLTypeMskIam: + meta.saslType = mode + if val, ok := config.TriggerMetadata["awsEndpoint"]; ok { + meta.awsEndpoint = val + } + if !meta.enableTLS { + return errors.New("TLS is required for MSK") + } + if val, ok := config.TriggerMetadata["awsRegion"]; ok && val != "" { + meta.awsRegion = val + } else { + return errors.New("no awsRegion given") + } + auth, err := getAwsAuthorization(config.AuthParams, config.TriggerMetadata, config.ResolvedEnv) + if err != nil { + return err + } + meta.awsAuthorization = auth + case KafkaSASLTypePlaintext: + fallthrough + case KafkaSASLTypeSCRAMSHA256: + fallthrough + case KafkaSASLTypeSCRAMSHA512: + if val, ok := config.AuthParams["username"]; ok { + meta.username = strings.TrimSpace(val) + } else { + return errors.New("no username given") + } + if val, ok := config.AuthParams["password"]; ok { + meta.password = strings.TrimSpace(val) + } else { + return errors.New("no password given") + } + case KafkaSASLTypeOAuthbearer: + return errors.New("SASL/OAUTHBEARER is not implemented yet") + default: + return fmt.Errorf("err sasl type %q given", mode) + } + } + + return nil +} + +func parseApacheKafkaMetadata(config *ScalerConfig, logger logr.Logger) (apacheKafkaMetadata, error) { + meta := apacheKafkaMetadata{} + switch { + case config.TriggerMetadata["bootstrapServersFromEnv"] != "": + meta.bootstrapServers = strings.Split(config.ResolvedEnv[config.TriggerMetadata["bootstrapServersFromEnv"]], ",") + case config.TriggerMetadata["bootstrapServers"] != "": + meta.bootstrapServers = strings.Split(config.TriggerMetadata["bootstrapServers"], ",") + default: + return meta, errors.New("no bootstrapServers given") + } + + switch { + case config.TriggerMetadata["consumerGroupFromEnv"] != "": + meta.group = config.ResolvedEnv[config.TriggerMetadata["consumerGroupFromEnv"]] + case config.TriggerMetadata["consumerGroup"] != "": + meta.group = config.TriggerMetadata["consumerGroup"] + default: + return meta, errors.New("no consumer group given") + } + + switch { + case config.TriggerMetadata["topicFromEnv"] != "": + meta.topic = strings.Split(config.ResolvedEnv[config.TriggerMetadata["topicFromEnv"]], ",") + case config.TriggerMetadata["topic"] != "": + meta.topic = strings.Split(config.TriggerMetadata["topic"], ",") + default: + meta.topic = []string{} + logger.V(1).Info(fmt.Sprintf("consumer group %q has no topics specified, "+ + "will use all topics subscribed by the consumer group for scaling", meta.group)) + } + + meta.partitionLimitation = nil + partitionLimitationMetadata := strings.TrimSpace(config.TriggerMetadata["partitionLimitation"]) + if partitionLimitationMetadata != "" { + if meta.topic == nil || len(meta.topic) == 0 { + logger.V(1).Info("no specific topics set, ignoring partitionLimitation setting") + } else { + pattern := config.TriggerMetadata["partitionLimitation"] + parsed, err := kedautil.ParseInt32List(pattern) + if err != nil { + return meta, fmt.Errorf("error parsing in partitionLimitation '%s': %w", pattern, err) + } + meta.partitionLimitation = parsed + logger.V(0).Info(fmt.Sprintf("partition limit active '%s'", pattern)) + } + } + + meta.offsetResetPolicy = defaultOffsetResetPolicy + + if config.TriggerMetadata["offsetResetPolicy"] != "" { + policy := offsetResetPolicy(config.TriggerMetadata["offsetResetPolicy"]) + if policy != earliest && policy != latest { + return meta, fmt.Errorf("err offsetResetPolicy policy %q given", policy) + } + meta.offsetResetPolicy = policy + } + + meta.lagThreshold = defaultKafkaLagThreshold + + if val, ok := config.TriggerMetadata[lagThresholdMetricName]; ok { + t, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return meta, fmt.Errorf("error parsing %q: %w", lagThresholdMetricName, err) + } + if t <= 0 { + return meta, fmt.Errorf("%q must be positive number", lagThresholdMetricName) + } + meta.lagThreshold = t + } + + meta.activationLagThreshold = defaultKafkaActivationLagThreshold + + if val, ok := config.TriggerMetadata[activationLagThresholdMetricName]; ok { + t, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return meta, fmt.Errorf("error parsing %q: %w", activationLagThresholdMetricName, err) + } + if t < 0 { + return meta, fmt.Errorf("%q must be positive number", activationLagThresholdMetricName) + } + meta.activationLagThreshold = t + } + + if err := parseApacheKafkaAuthParams(config, &meta); err != nil { + return meta, err + } + + meta.allowIdleConsumers = false + if val, ok := config.TriggerMetadata["allowIdleConsumers"]; ok { + t, err := strconv.ParseBool(val) + if err != nil { + return meta, fmt.Errorf("error parsing allowIdleConsumers: %w", err) + } + meta.allowIdleConsumers = t + } + + meta.excludePersistentLag = false + if val, ok := config.TriggerMetadata["excludePersistentLag"]; ok { + t, err := strconv.ParseBool(val) + if err != nil { + return meta, fmt.Errorf("error parsing excludePersistentLag: %w", err) + } + meta.excludePersistentLag = t + } + + meta.scaleToZeroOnInvalidOffset = false + if val, ok := config.TriggerMetadata["scaleToZeroOnInvalidOffset"]; ok { + t, err := strconv.ParseBool(val) + if err != nil { + return meta, fmt.Errorf("error parsing scaleToZeroOnInvalidOffset: %w", err) + } + meta.scaleToZeroOnInvalidOffset = t + } + + meta.scalerIndex = config.ScalerIndex + return meta, nil +} + +func getApacheKafkaClient(ctx context.Context, metadata apacheKafkaMetadata, logger logr.Logger) (*kafka.Client, error) { + var saslMechanism sasl.Mechanism + var tlsConfig *tls.Config + var err error + + logger.V(4).Info(fmt.Sprintf("Kafka SASL type %s", metadata.saslType)) + if metadata.enableTLS { + tlsConfig, err = kedautil.NewTLSConfigWithPassword(metadata.cert, metadata.key, metadata.keyPassword, metadata.ca, false) + if err != nil { + return nil, err + } + } + + switch metadata.saslType { + case KafkaSASLTypeNone: + saslMechanism = nil + case KafkaSASLTypePlaintext: + saslMechanism = plain.Mechanism{ + Username: metadata.username, + Password: metadata.password, + } + case KafkaSASLTypeSCRAMSHA256: + saslMechanism, err = scram.Mechanism(scram.SHA256, metadata.username, metadata.password) + if err != nil { + return nil, err + } + case KafkaSASLTypeSCRAMSHA512: + saslMechanism, err = scram.Mechanism(scram.SHA512, metadata.username, metadata.password) + if err != nil { + return nil, err + } + case KafkaSASLTypeOAuthbearer: + return nil, errors.New("SASL/OAUTHBEARER is not implemented yet") + case KafkaSASLTypeMskIam: + cfg, err := getAwsConfig(ctx, metadata.awsRegion, metadata.awsAuthorization) + if err != nil { + return nil, err + } + + saslMechanism = aws_msk_iam_v2.NewMechanism(*cfg) + default: + return nil, fmt.Errorf("err sasl type %q given", metadata.saslType) + } + + transport := &kafka.Transport{ + TLS: tlsConfig, + SASL: saslMechanism, + } + client := kafka.Client{ + Addr: kafka.TCP(metadata.bootstrapServers...), + Transport: transport, + } + if err != nil { + return nil, fmt.Errorf("error creating kafka client: %w", err) + } + + return &client, nil +} + +func (s *apacheKafkaScaler) getTopicPartitions(ctx context.Context) (map[string][]int, error) { + metadata, err := s.client.Metadata(ctx, &kafka.MetadataRequest{ + Addr: s.client.Addr, + }) + if err != nil { + return nil, fmt.Errorf("error getting metadata: %w", err) + } + s.logger.V(4).Info(fmt.Sprintf("Listed topics %v", metadata.Topics)) + + if len(s.metadata.topic) == 0 { + // in case of empty topic name, we will get all topics that the consumer group is subscribed to + describeGrpReq := &kafka.DescribeGroupsRequest{ + Addr: s.client.Addr, + GroupIDs: []string{ + s.metadata.group, + }, + } + describeGrp, err := s.client.DescribeGroups(ctx, describeGrpReq) + if err != nil { + return nil, fmt.Errorf("error describing group: %w", err) + } + if len(describeGrp.Groups[0].Members) == 0 { + return nil, fmt.Errorf("no active members in group %s, group-state is %s", s.metadata.group, describeGrp.Groups[0].GroupState) + } + s.logger.V(4).Info(fmt.Sprintf("Described group %s with response %v", s.metadata.group, describeGrp)) + + result := make(map[string][]int) + for _, topic := range metadata.Topics { + partitions := make([]int, 0) + for _, partition := range topic.Partitions { + // if no partitions limitatitions are specified, all partitions are considered + if (len(s.metadata.partitionLimitation) == 0) || + (len(s.metadata.partitionLimitation) > 0 && kedautil.Contains(s.metadata.partitionLimitation, int32(partition.ID))) { + partitions = append(partitions, partition.ID) + } + } + result[topic.Name] = partitions + } + return result, nil + } + result := make(map[string][]int) + for _, topic := range metadata.Topics { + partitions := make([]int, 0) + if kedautil.Contains(s.metadata.topic, topic.Name) { + for _, partition := range topic.Partitions { + if (len(s.metadata.partitionLimitation) == 0) || + (len(s.metadata.partitionLimitation) > 0 && kedautil.Contains(s.metadata.partitionLimitation, int32(partition.ID))) { + partitions = append(partitions, partition.ID) + } + } + } + result[topic.Name] = partitions + } + return result, nil +} + +func (s *apacheKafkaScaler) getConsumerOffsets(ctx context.Context, topicPartitions map[string][]int) (map[string]map[int]int64, error) { + response, err := s.client.OffsetFetch( + ctx, + &kafka.OffsetFetchRequest{ + GroupID: s.metadata.group, + Topics: topicPartitions, + }, + ) + if err != nil || response.Error != nil { + return nil, fmt.Errorf("error listing consumer group offset: %w", err) + } + consumerOffset := make(map[string]map[int]int64) + for topic, partitionsOffset := range response.Topics { + consumerOffset[topic] = make(map[int]int64) + for _, partition := range partitionsOffset { + consumerOffset[topic][partition.Partition] = partition.CommittedOffset + } + } + return consumerOffset, nil +} + +/* +getLagForPartition returns (lag, lagWithPersistent, error) + +When excludePersistentLag is set to `false` (default), lag will always be equal to lagWithPersistent +When excludePersistentLag is set to `true`, if partition is deemed to have persistent lag, lag will be set to 0 and lagWithPersistent will be latestOffset - consumerOffset +These return values will allow proper scaling from 0 -> 1 replicas by the IsActive func. +*/ +func (s *apacheKafkaScaler) getLagForPartition(topic string, partitionID int, consumerOffsets map[string]map[int]int64, producerOffsets map[string]map[int]int64) (int64, int64, error) { + if len(consumerOffsets) == 0 { + return 0, 0, fmt.Errorf("consumerOffsets is empty") + } + if len(producerOffsets) == 0 { + return 0, 0, fmt.Errorf("producerOffsets is empty") + } + + consumerOffset := consumerOffsets[topic][partitionID] + if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == latest { + retVal := int64(1) + if s.metadata.scaleToZeroOnInvalidOffset { + retVal = 0 + } + msg := fmt.Sprintf( + "invalid offset found for topic %s in group %s and partition %d, probably no offset is committed yet. Returning with lag of %d", + topic, s.metadata.group, partitionID, retVal) + s.logger.V(1).Info(msg) + return retVal, retVal, nil + } + + if _, found := producerOffsets[topic]; !found { + return 0, 0, fmt.Errorf("error finding partition offset for topic %s", topic) + } + producerOffset := producerOffsets[topic][partitionID] + if consumerOffset == invalidOffset && s.metadata.offsetResetPolicy == earliest { + return producerOffset, producerOffset, nil + } + + // This code block tries to prevent KEDA Kafka trigger from scaling the scale target based on erroneous events + if s.metadata.excludePersistentLag { + switch previousOffset, found := s.previousOffsets[topic][partitionID]; { + case !found: + // No record of previous offset, so store current consumer offset + // Allow this consumer lag to be considered in scaling + if _, topicFound := s.previousOffsets[topic]; !topicFound { + s.previousOffsets[topic] = map[int]int64{partitionID: consumerOffset} + } else { + s.previousOffsets[topic][partitionID] = consumerOffset + } + case previousOffset == consumerOffset: + // Indicates consumer is still on the same offset as the previous polling cycle, there may be some issue with consuming this offset. + // return 0, so this consumer lag is not considered for scaling + return 0, producerOffset - consumerOffset, nil + default: + // Successfully Consumed some messages, proceed to change the previous offset + s.previousOffsets[topic][partitionID] = consumerOffset + } + } + + s.logger.V(4).Info(fmt.Sprintf("Consumer offset for topic %s in group %s and partition %d is %d", topic, s.metadata.group, partitionID, consumerOffset)) + s.logger.V(4).Info(fmt.Sprintf("Producer offset for topic %s in group %s and partition %d is %d", topic, s.metadata.group, partitionID, producerOffset)) + + return producerOffset - consumerOffset, producerOffset - consumerOffset, nil +} + +// Close closes the kafka client +func (s *apacheKafkaScaler) Close(context.Context) error { + if s.client == nil { + return nil + } + transport := s.client.Transport.(*kafka.Transport) + if transport != nil { + transport.CloseIdleConnections() + } + return nil +} + +func (s *apacheKafkaScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSpec { + var metricName string + if s.metadata.topic != nil && len(s.metadata.topic) > 0 { + metricName = fmt.Sprintf("kafka-%s", strings.Join(s.metadata.topic, ",")) + } else { + metricName = fmt.Sprintf("kafka-%s-topics", s.metadata.group) + } + + externalMetric := &v2.ExternalMetricSource{ + Metric: v2.MetricIdentifier{ + Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(metricName)), + }, + Target: GetMetricTarget(s.metricType, s.metadata.lagThreshold), + } + metricSpec := v2.MetricSpec{External: externalMetric, Type: kafkaMetricType} + return []v2.MetricSpec{metricSpec} +} + +type apacheKafkaConsumerOffsetResult struct { + consumerOffsets map[string]map[int]int64 + err error +} + +type apacheKafkaProducerOffsetResult struct { + producerOffsets map[string]map[int]int64 + err error +} + +// getConsumerAndProducerOffsets returns (consumerOffsets, producerOffsets, error) +func (s *apacheKafkaScaler) getConsumerAndProducerOffsets(ctx context.Context, topicPartitions map[string][]int) (map[string]map[int]int64, map[string]map[int]int64, error) { + consumerChan := make(chan apacheKafkaConsumerOffsetResult, 1) + go func() { + consumerOffsets, err := s.getConsumerOffsets(ctx, topicPartitions) + consumerChan <- apacheKafkaConsumerOffsetResult{consumerOffsets, err} + }() + + producerChan := make(chan apacheKafkaProducerOffsetResult, 1) + go func() { + producerOffsets, err := s.getProducerOffsets(ctx, topicPartitions) + producerChan <- apacheKafkaProducerOffsetResult{producerOffsets, err} + }() + + consumerRes := <-consumerChan + if consumerRes.err != nil { + return nil, nil, consumerRes.err + } + + producerRes := <-producerChan + if producerRes.err != nil { + return nil, nil, producerRes.err + } + + return consumerRes.consumerOffsets, producerRes.producerOffsets, nil +} + +// GetMetricsAndActivity returns value for a supported metric and an error if there is a problem getting the metric +func (s *apacheKafkaScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + totalLag, totalLagWithPersistent, err := s.getTotalLag(ctx) + if err != nil { + return []external_metrics.ExternalMetricValue{}, false, err + } + metric := GenerateMetricInMili(metricName, float64(totalLag)) + + return []external_metrics.ExternalMetricValue{metric}, totalLagWithPersistent > s.metadata.activationLagThreshold, nil +} + +// getTotalLag returns totalLag, totalLagWithPersistent, error +// totalLag and totalLagWithPersistent are the summations of lag and lagWithPersistent returned by getLagForPartition function respectively. +// totalLag maybe less than totalLagWithPersistent when excludePersistentLag is set to `true` due to some partitions deemed as having persistent lag +func (s *apacheKafkaScaler) getTotalLag(ctx context.Context) (int64, int64, error) { + topicPartitions, err := s.getTopicPartitions(ctx) + if err != nil { + return 0, 0, err + } + s.logger.V(4).Info(fmt.Sprintf("Kafka scaler: Topic partitions %v", topicPartitions)) + + consumerOffsets, producerOffsets, err := s.getConsumerAndProducerOffsets(ctx, topicPartitions) + s.logger.V(4).Info(fmt.Sprintf("Kafka scaler: Consumer offsets %v, producer offsets %v", consumerOffsets, producerOffsets)) + if err != nil { + return 0, 0, err + } + + totalLag := int64(0) + totalLagWithPersistent := int64(0) + totalTopicPartitions := int64(0) + + for topic, partitionsOffsets := range producerOffsets { + for partition := range partitionsOffsets { + lag, lagWithPersistent, err := s.getLagForPartition(topic, partition, consumerOffsets, producerOffsets) + if err != nil { + return 0, 0, err + } + totalLag += lag + totalLagWithPersistent += lagWithPersistent + } + totalTopicPartitions += (int64)(len(partitionsOffsets)) + } + s.logger.V(1).Info(fmt.Sprintf("Kafka scaler: Providing metrics based on totalLag %v, topicPartitions %v, threshold %v", totalLag, topicPartitions, s.metadata.lagThreshold)) + + s.logger.V(1).Info(fmt.Sprintf("Kafka scaler: Consumer offsets %v, producer offsets %v", consumerOffsets, producerOffsets)) + + if !s.metadata.allowIdleConsumers { + // don't scale out beyond the number of topicPartitions + if (totalLag / s.metadata.lagThreshold) > totalTopicPartitions { + totalLag = totalTopicPartitions * s.metadata.lagThreshold + } + } + return totalLag, totalLagWithPersistent, nil +} + +// getProducerOffsets returns the latest offsets for the given topic partitions +func (s *apacheKafkaScaler) getProducerOffsets(ctx context.Context, topicPartitions map[string][]int) (map[string]map[int]int64, error) { + // Step 1: build one OffsetRequest + offsetRequest := make(map[string][]kafka.OffsetRequest) + + for topic, partitions := range topicPartitions { + for _, partitionID := range partitions { + offsetRequest[topic] = append(offsetRequest[topic], kafka.FirstOffsetOf(partitionID), kafka.LastOffsetOf(partitionID)) + } + } + + // Step 2: send request + res, err := s.client.ListOffsets(ctx, &kafka.ListOffsetsRequest{ + Addr: s.client.Addr, + Topics: offsetRequest, + }) + if err != nil { + return nil, err + } + + // Step 3: parse response and return + producerOffsets := make(map[string]map[int]int64) + for topic, partitionOffset := range res.Topics { + producerOffsets[topic] = make(map[int]int64) + for _, partition := range partitionOffset { + producerOffsets[topic][partition.Partition] = partition.LastOffset + } + } + + return producerOffsets, nil +} diff --git a/pkg/scalers/apache_kafka_scaler_test.go b/pkg/scalers/apache_kafka_scaler_test.go new file mode 100644 index 00000000000..1d7d5539ded --- /dev/null +++ b/pkg/scalers/apache_kafka_scaler_test.go @@ -0,0 +1,370 @@ +package scalers + +import ( + "context" + "fmt" + "reflect" + "testing" + + "github.com/go-logr/logr" +) + +type parseApacheKafkaMetadataTestData struct { + metadata map[string]string + isError bool + numBrokers int + brokers []string + group string + topic []string + partitionLimitation []int32 + offsetResetPolicy offsetResetPolicy + allowIdleConsumers bool + excludePersistentLag bool +} + +type parseApacheKafkaAuthParamsTestData struct { + authParams map[string]string + isError bool + enableTLS bool +} + +// Testing the case where `tls` and `sasl` are specified in ScaledObject +type parseApacheKafkaAuthParamsTestDataSecondAuthMethod struct { + metadata map[string]string + authParams map[string]string + isError bool + enableTLS bool +} + +type apacheKafkaMetricIdentifier struct { + metadataTestData *parseApacheKafkaMetadataTestData + scalerIndex int + name string +} + +// A complete valid metadata example for reference +var validApacheKafkaMetadata = map[string]string{ + "bootstrapServers": "broker1:9092,broker2:9092", + "consumerGroup": "my-group", + "topic": "my-topics", + "allowIdleConsumers": "false", +} + +// A complete valid authParams example for sasl, with username and passwd +var validApacheKafkaWithAuthParams = map[string]string{ + "sasl": "plaintext", + "username": "admin", + "password": "admin", +} + +// A complete valid authParams example for sasl, without username and passwd +var validApacheKafkaWithoutAuthParams = map[string]string{} + +var parseApacheKafkaMetadataTestDataset = []parseApacheKafkaMetadataTestData{ + // failure, no consumer group + {map[string]string{"bootstrapServers": "foobar:9092"}, true, 1, []string{"foobar:9092"}, "", nil, nil, "latest", false, false}, + // success, no topics + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group"}, false, 1, []string{"foobar:9092"}, "my-group", []string{}, nil, offsetResetPolicy("latest"), false, false}, + // success, ignore partitionLimitation if no topics + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "partitionLimitation": "1,2,3,4,5,6"}, false, 1, []string{"foobar:9092"}, "my-group", []string{}, nil, offsetResetPolicy("latest"), false, false}, + // success, no limitation with whitespaced limitation value + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "partitionLimitation": " "}, false, 1, []string{"foobar:9092"}, "my-group", []string{}, nil, offsetResetPolicy("latest"), false, false}, + // success, no limitation + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "partitionLimitation": ""}, false, 1, []string{"foobar:9092"}, "my-group", []string{}, nil, offsetResetPolicy("latest"), false, false}, + // failure, lagThreshold is negative value + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "lagThreshold": "-1"}, true, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // failure, lagThreshold is 0 + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "lagThreshold": "0"}, true, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // success, activationLagThreshold is 0 + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "lagThreshold": "10", "activationLagThreshold": "0"}, false, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // success + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics"}, false, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // success, partitionLimitation as list + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "partitionLimitation": "1,2,3,4"}, false, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, []int32{1, 2, 3, 4}, offsetResetPolicy("latest"), false, false}, + // success, partitionLimitation as range + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "partitionLimitation": "1-4"}, false, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, []int32{1, 2, 3, 4}, offsetResetPolicy("latest"), false, false}, + // success, partitionLimitation mixed list + ranges + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "partitionLimitation": "1-4,8,10-12"}, false, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, []int32{1, 2, 3, 4, 8, 10, 11, 12}, offsetResetPolicy("latest"), false, false}, + // failure, partitionLimitation wrong data type + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "partitionLimitation": "a,b,c,d"}, true, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // success, more brokers + {map[string]string{"bootstrapServers": "foo:9092,bar:9092", "consumerGroup": "my-group", "topic": "my-topics"}, false, 2, []string{"foo:9092", "bar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // success, offsetResetPolicy policy latest + {map[string]string{"bootstrapServers": "foo:9092,bar:9092", "consumerGroup": "my-group", "topic": "my-topics", "offsetResetPolicy": "latest"}, false, 2, []string{"foo:9092", "bar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // failure, offsetResetPolicy policy wrong + {map[string]string{"bootstrapServers": "foo:9092,bar:9092", "consumerGroup": "my-group", "topic": "my-topics", "offsetResetPolicy": "foo"}, true, 2, []string{"foo:9092", "bar:9092"}, "my-group", []string{"my-topics"}, nil, "", false, false}, + // success, offsetResetPolicy policy earliest + {map[string]string{"bootstrapServers": "foo:9092,bar:9092", "consumerGroup": "my-group", "topic": "my-topics", "offsetResetPolicy": "earliest"}, false, 2, []string{"foo:9092", "bar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("earliest"), false, false}, + // failure, allowIdleConsumers malformed + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "notvalid"}, true, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // success, allowIdleConsumers is true + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, false, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), true, false}, + // failure, excludePersistentLag is malformed + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "excludePersistentLag": "notvalid"}, true, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, false}, + // success, excludePersistentLag is true + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "excludePersistentLag": "true"}, false, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), false, true}, + // success, version supported + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, false, 1, []string{"foobar:9092"}, "my-group", []string{"my-topics"}, nil, offsetResetPolicy("latest"), true, false}, +} + +var parseApacheKafkaAuthParamsTestDataset = []parseApacheKafkaAuthParamsTestData{ + // success, SASL only + {map[string]string{"sasl": "plaintext", "username": "admin", "password": "admin"}, false, false}, + // success, SASL only + {map[string]string{"sasl": "scram_sha256", "username": "admin", "password": "admin"}, false, false}, + // success, SASL only + {map[string]string{"sasl": "scram_sha512", "username": "admin", "password": "admin"}, false, false}, + // success, TLS only + {map[string]string{"tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, false, true}, + // success, TLS cert/key and assumed public CA + {map[string]string{"tls": "enable", "cert": "ceert", "key": "keey"}, false, true}, + // success, TLS cert/key + key password and assumed public CA + {map[string]string{"tls": "enable", "cert": "ceert", "key": "keey", "keyPassword": "keeyPassword"}, false, true}, + // success, TLS CA only + {map[string]string{"tls": "enable", "ca": "caaa"}, false, true}, + // success, SASL + TLS + {map[string]string{"sasl": "plaintext", "username": "admin", "password": "admin", "tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, false, true}, + // success, SASL + TLS explicitly disabled + {map[string]string{"sasl": "plaintext", "username": "admin", "password": "admin", "tls": "disable"}, false, false}, + // failure, SASL incorrect type + {map[string]string{"sasl": "foo", "username": "admin", "password": "admin"}, true, false}, + // failure, SASL missing username + {map[string]string{"sasl": "plaintext", "password": "admin"}, true, false}, + // failure, SASL missing password + {map[string]string{"sasl": "plaintext", "username": "admin"}, true, false}, + // failure, TLS missing cert + {map[string]string{"tls": "enable", "ca": "caaa", "key": "keey"}, true, false}, + // failure, TLS missing key + {map[string]string{"tls": "enable", "ca": "caaa", "cert": "ceert"}, true, false}, + // failure, TLS invalid + {map[string]string{"tls": "yes", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, false}, + // failure, SASL + TLS, incorrect sasl + {map[string]string{"sasl": "foo", "username": "admin", "password": "admin", "tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, true}, + // failure, SASL + TLS, incorrect tls + {map[string]string{"sasl": "plaintext", "username": "admin", "password": "admin", "tls": "foo", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, false}, + // failure, SASL + TLS, missing username + {map[string]string{"sasl": "plaintext", "password": "admin", "tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, true}, + // failure, SASL + TLS, missing password + {map[string]string{"sasl": "plaintext", "username": "admin", "tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, true}, + // failure, SASL + TLS, missing cert + {map[string]string{"sasl": "plaintext", "username": "admin", "password": "admin", "tls": "enable", "ca": "caaa", "key": "keey"}, true, true}, + // failure, SASL + TLS, missing key + {map[string]string{"sasl": "plaintext", "username": "admin", "password": "admin", "tls": "enable", "ca": "caaa", "cert": "ceert"}, true, true}, +} +var parseApacheKafkaAuthParamsTestDataset2 = []parseApacheKafkaAuthParamsTestDataSecondAuthMethod{ + // success, SASL plaintext + {map[string]string{"sasl": "plaintext", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin"}, false, false}, + // success, SASL scram_sha256 + {map[string]string{"sasl": "scram_sha256", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin"}, false, false}, + // success, SASL scram_sha512 + {map[string]string{"sasl": "scram_sha512", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin"}, false, false}, + // success, TLS only + {map[string]string{"tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"ca": "caaa", "cert": "ceert", "key": "keey"}, false, true}, + // success, TLS cert/key and assumed public CA + {map[string]string{"tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"cert": "ceert", "key": "keey"}, false, true}, + // success, TLS cert/key + key password and assumed public CA + {map[string]string{"tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"cert": "ceert", "key": "keey", "keyPassword": "keeyPassword"}, false, true}, + // success, TLS CA only + {map[string]string{"tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"ca": "caaa"}, false, true}, + // success, SASL + TLS + {map[string]string{"sasl": "plaintext", "tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin", "ca": "caaa", "cert": "ceert", "key": "keey"}, false, true}, + // success, SASL + TLS explicitly disabled + {map[string]string{"sasl": "plaintext", "tls": "disable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin"}, false, false}, + // failure, SASL incorrect type + {map[string]string{"sasl": "foo", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin"}, true, false}, + // failure, SASL missing username + {map[string]string{"sasl": "plaintext", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"password": "admin"}, true, false}, + // failure, SASL missing password + {map[string]string{"sasl": "plaintext", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin"}, true, false}, + // failure, TLS missing cert + {map[string]string{"tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"ca": "caaa", "key": "keey"}, true, false}, + // failure, TLS missing key + {map[string]string{"tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"ca": "caaa", "cert": "ceert"}, true, false}, + // failure, TLS invalid + {map[string]string{"tls": "random", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"ca": "caaa", "cert": "ceert", "key": "keey"}, true, false}, + // failure, SASL + TLS, incorrect SASL type + {map[string]string{"sasl": "foo", "tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, false}, + // failure, SASL + TLS, incorrect tls + {map[string]string{"sasl": "plaintext", "tls": "foo", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, false}, + // failure, SASL + TLS, missing username + {map[string]string{"sasl": "plaintext", "tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"password": "admin", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, true}, + // failure, SASL + TLS, missing password + {map[string]string{"sasl": "plaintext", "tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, true}, + // failure, SASL + TLS, missing cert + {map[string]string{"sasl": "plaintext", "tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"username": "admin", "password": "admin", "ca": "caaa", "key": "keey"}, true, true}, + // failure, SASL + TLS, missing key + {map[string]string{"sasl": "plaintext", "tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"sasl": "plaintext", "username": "admin", "password": "admin", "ca": "caaa", "cert": "ceert"}, true, true}, + + // failure, setting SASL values in both places + {map[string]string{"sasl": "scram_sha512", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"sasl": "scram_sha512", "username": "admin", "password": "admin"}, true, false}, + // failure, setting TLS values in both places + {map[string]string{"tls": "enable", "bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"tls": "enable", "ca": "caaa", "cert": "ceert", "key": "keey"}, true, true}, + // success, setting SASL plaintext value with extra \n in TriggerAuthentication + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"sasl": "plaintext\n", "username": "admin", "password": "admin"}, false, true}, + // success, setting SASL plaintext value with extra space in TriggerAuthentication + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"sasl": "plaintext ", "username": "admin", "password": "admin"}, false, true}, + // success, setting SASL scram_sha256 value with extra \n in TriggerAuthentication + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"sasl": "scram_sha256\n", "username": "admin", "password": "admin"}, false, true}, + // success, setting SASL scram_sha256 value with extra space in TriggerAuthentication + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"sasl": "scram_sha256 ", "username": "admin", "password": "admin"}, false, true}, + // success, setting SASL scram_sha512 value with extra \n in TriggerAuthentication + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"sasl": "scram_sha512\n", "username": "admin", "password": "admin"}, false, true}, + // success, setting SASL scram_sha512 value with extra space in TriggerAuthentication + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"sasl": "scram_sha512 ", "username": "admin", "password": "admin"}, false, true}, + // success, setting SASL aws_msk_iam with tls enabled and passing credentials + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true", "awsRegion": "us-east-1"}, map[string]string{"tls": "enable", "sasl": "aws_msk_iam", "awsAccessKeyID": "none", "awsSecretAccessKey": "none"}, false, true}, + // failure, setting SASL aws_msk_iam with tls enabled and missing awsRegion + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true"}, map[string]string{"tls": "enable", "sasl": "aws_msk_iam", "awsAccessKeyID": "none", "awsSecretAccessKey": "none"}, true, true}, + // failure, setting SASL aws_msk_iam with tls disabled + {map[string]string{"bootstrapServers": "foobar:9092", "consumerGroup": "my-group", "topic": "my-topics", "allowIdleConsumers": "true", "awsRegion": "us-east-1"}, map[string]string{"sasl": "aws_msk_iam", "awsAccessKeyID": "none", "awsSecretAccessKey": "none"}, true, false}, +} + +var apacheKafkaMetricIdentifiers = []apacheKafkaMetricIdentifier{ + {&parseApacheKafkaMetadataTestDataset[10], 0, "s0-kafka-my-topics"}, + {&parseApacheKafkaMetadataTestDataset[10], 1, "s1-kafka-my-topics"}, + {&parseApacheKafkaMetadataTestDataset[2], 1, "s1-kafka-my-group-topics"}, +} + +func TestApacheKafkaGetBrokers(t *testing.T) { + for _, testData := range parseApacheKafkaMetadataTestDataset { + meta, err := parseApacheKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: validApacheKafkaWithAuthParams}, logr.Discard()) + + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } + if testData.isError && err == nil { + t.Error("Expected error but got success") + } + if len(meta.bootstrapServers) != testData.numBrokers { + t.Errorf("Expected %d bootstrap servers but got %d\n", testData.numBrokers, len(meta.bootstrapServers)) + } + if !reflect.DeepEqual(testData.brokers, meta.bootstrapServers) { + t.Errorf("Expected %#v but got %#v\n", testData.brokers, meta.bootstrapServers) + } + if meta.group != testData.group { + t.Errorf("Expected group %s but got %s\n", testData.group, meta.group) + } + if !reflect.DeepEqual(testData.topic, meta.topic) { + t.Errorf("Expected topics %#v but got %#v\n", testData.topic, meta.topic) + } + if !reflect.DeepEqual(testData.partitionLimitation, meta.partitionLimitation) { + t.Errorf("Expected %#v but got %#v\n", testData.partitionLimitation, meta.partitionLimitation) + } + if err == nil && meta.offsetResetPolicy != testData.offsetResetPolicy { + t.Errorf("Expected offsetResetPolicy %s but got %s\n", testData.offsetResetPolicy, meta.offsetResetPolicy) + } + + meta, err = parseApacheKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: validApacheKafkaWithoutAuthParams}, logr.Discard()) + + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } + if testData.isError && err == nil { + t.Error("Expected error but got success") + } + if len(meta.bootstrapServers) != testData.numBrokers { + t.Errorf("Expected %d bootstrap servers but got %d\n", testData.numBrokers, len(meta.bootstrapServers)) + } + if !reflect.DeepEqual(testData.brokers, meta.bootstrapServers) { + t.Errorf("Expected %#v but got %#v\n", testData.brokers, meta.bootstrapServers) + } + if meta.group != testData.group { + t.Errorf("Expected group %s but got %s\n", testData.group, meta.group) + } + if !reflect.DeepEqual(testData.topic, meta.topic) { + t.Errorf("Expected topics %#v but got %#v\n", testData.topic, meta.topic) + } + if !reflect.DeepEqual(testData.partitionLimitation, meta.partitionLimitation) { + t.Errorf("Expected %#v but got %#v\n", testData.partitionLimitation, meta.partitionLimitation) + } + if err == nil && meta.offsetResetPolicy != testData.offsetResetPolicy { + t.Errorf("Expected offsetResetPolicy %s but got %s\n", testData.offsetResetPolicy, meta.offsetResetPolicy) + } + if err == nil && meta.allowIdleConsumers != testData.allowIdleConsumers { + t.Errorf("Expected allowIdleConsumers %t but got %t\n", testData.allowIdleConsumers, meta.allowIdleConsumers) + } + if err == nil && meta.excludePersistentLag != testData.excludePersistentLag { + t.Errorf("Expected excludePersistentLag %t but got %t\n", testData.excludePersistentLag, meta.excludePersistentLag) + } + } +} + +func TestApacheKafkaAuthParams(t *testing.T) { + // Testing tls and sasl value in TriggerAuthentication + for _, testData := range parseApacheKafkaAuthParamsTestDataset { + meta, err := parseApacheKafkaMetadata(&ScalerConfig{TriggerMetadata: validApacheKafkaMetadata, AuthParams: testData.authParams}, logr.Discard()) + + if err != nil && !testData.isError { + t.Error("Expected success but got error", err) + } + if testData.isError && err == nil { + t.Error("Expected error but got success") + } + // we can ignore what tls is set if there is error + if err == nil && meta.enableTLS != testData.enableTLS { + t.Errorf("Expected enableTLS to be set to %#v but got %#v\n", testData.enableTLS, meta.enableTLS) + } + if err == nil && meta.enableTLS { + if meta.ca != testData.authParams["ca"] { + t.Errorf("Expected ca to be set to %#v but got %#v\n", testData.authParams["ca"], meta.ca) + } + if meta.cert != testData.authParams["cert"] { + t.Errorf("Expected cert to be set to %#v but got %#v\n", testData.authParams["cert"], meta.cert) + } + if meta.key != testData.authParams["key"] { + t.Errorf("Expected key to be set to %#v but got %#v\n", testData.authParams["key"], meta.key) + } + if meta.keyPassword != testData.authParams["keyPassword"] { + t.Errorf("Expected key to be set to %#v but got %#v\n", testData.authParams["keyPassword"], meta.key) + } + } + } + + // Testing tls and sasl value in scaledObject + for id, testData := range parseApacheKafkaAuthParamsTestDataset2 { + meta, err := parseApacheKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}, logr.Discard()) + + if err != nil && !testData.isError { + t.Errorf("Test case: %#v. Expected success but got error %#v", id, err) + } + if testData.isError && err == nil { + t.Errorf("Test case: %#v. Expected error but got success", id) + } + if !testData.isError { + if testData.metadata["tls"] == stringTrue && !meta.enableTLS { + t.Errorf("Test case: %#v. Expected tls to be set to %#v but got %#v\n", id, testData.metadata["tls"], meta.enableTLS) + } + if meta.enableTLS { + if meta.ca != testData.authParams["ca"] { + t.Errorf("Test case: %#v. Expected ca to be set to %#v but got %#v\n", id, testData.authParams["ca"], meta.ca) + } + if meta.cert != testData.authParams["cert"] { + t.Errorf("Test case: %#v. Expected cert to be set to %#v but got %#v\n", id, testData.authParams["cert"], meta.cert) + } + if meta.key != testData.authParams["key"] { + t.Errorf("Test case: %#v. Expected key to be set to %#v but got %#v\n", id, testData.authParams["key"], meta.key) + } + if meta.keyPassword != testData.authParams["keyPassword"] { + t.Errorf("Test case: %#v. Expected key to be set to %#v but got %#v\n", id, testData.authParams["keyPassword"], meta.keyPassword) + } + } + } + } +} + +func TestApacheKafkaGetMetricSpecForScaling(t *testing.T) { + for _, testData := range apacheKafkaMetricIdentifiers { + meta, err := parseApacheKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: validApacheKafkaWithAuthParams, ScalerIndex: testData.scalerIndex}, logr.Discard()) + if err != nil { + t.Fatal("Could not parse metadata:", err) + } + mockKafkaScaler := apacheKafkaScaler{"", meta, nil, logr.Discard(), make(map[string]map[int]int64)} + + metricSpec := mockKafkaScaler.GetMetricSpecForScaling(context.Background()) + metricName := metricSpec[0].External.Metric.Name + if metricName != testData.name { + str := fmt.Sprintf("Wrong External metric source name: %s, expected: %s for %#v\n", metricName, testData.name, testData) + t.Error("Wrong External metric source name:", metricName, str) + } + } +} diff --git a/pkg/scalers/kafka_scaler.go b/pkg/scalers/kafka_scaler.go index 07efd729b59..e7f0266ea24 100644 --- a/pkg/scalers/kafka_scaler.go +++ b/pkg/scalers/kafka_scaler.go @@ -1,3 +1,22 @@ +/* +Copyright 2023 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This scaler is based on sarama library. +// It lacks support for AWS MSK. For AWS MSK please see: apache-kafka scaler. + package scalers import ( diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 13175790aad..b9f1a23f2e1 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -107,6 +107,8 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, switch triggerType { case "activemq": return scalers.NewActiveMQScaler(config) + case "apache-kafka": + return scalers.NewApacheKafkaScaler(ctx, config) case "arangodb": return scalers.NewArangoDBScaler(config) case "artemis-queue": diff --git a/pkg/util/helpers.go b/pkg/util/helpers.go new file mode 100644 index 00000000000..03ca78242bf --- /dev/null +++ b/pkg/util/helpers.go @@ -0,0 +1,27 @@ +/* +Copyright 2023 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +// Contains checks if a slice contains a given element. +func Contains[T comparable](s []T, e T) bool { + for _, v := range s { + if v == e { + return true + } + } + return false +} diff --git a/pkg/util/helpers_test.go b/pkg/util/helpers_test.go new file mode 100644 index 00000000000..780715210cc --- /dev/null +++ b/pkg/util/helpers_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2023 The KEDA Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "testing" + +func TestContains(t *testing.T) { + type args[TP comparable] struct { + s []TP + e TP + } + + type person struct { + Name string + Age int + } + + intTests := []struct { + name string + args args[int] + want bool + }{ + { + name: "int slice contains element", + args: args[int]{ + s: []int{1, 2, 3, 4, 5}, + e: 3, + }, + want: true, + }, + { + name: "int slice does not contain element", + args: args[int]{ + s: []int{1, 2, 3, 4, 5}, + e: 6, + }, + want: false, + }, + { + name: "empty int slice does not contain element", + args: args[int]{ + s: []int{}, + e: 6, + }, + want: false, + }, + } + + stringTests := []struct { + name string + args args[string] + want bool + }{ + { + name: "string slice contains element", + args: args[string]{ + s: []string{"a", "b", "c", "d", "e"}, + e: "c", + }, + want: true, + }, + { + name: "string slice does not contain element", + args: args[string]{ + s: []string{"a", "b", "c", "d", "e"}, + e: "f", + }, + want: false, + }, + { + name: "empty string slice does not contain element", + args: args[string]{ + s: []string{}, + e: "f", + }, + want: false, + }, + { + name: "string slice contains empty string", + args: args[string]{ + s: []string{"a", "b", "c", "d", "e"}, + e: "", + }, + want: false, + }, + } + + personTests := []struct { + name string + args args[person] + want bool + }{ + { + name: "person slice contains element", + args: args[person]{ + s: []person{ + { + Name: "John", + Age: 30, + }, + { + Name: "Jane", + Age: 25, + }, + { + Name: "Bob", + Age: 40, + }, + }, + e: person{ + Name: "Jane", + Age: 25, + }, + }, + want: true, + }, + { + name: "person slice does not contain element", + args: args[person]{ + s: []person{ + { + Name: "John", + Age: 30, + }, + { + Name: "Jane", + Age: 25, + }, + { + Name: "Bob", + Age: 40, + }, + }, + e: person{ + Name: "Alice", + Age: 20, + }, + }, + want: false, + }, + { + name: "slice does not fully match", + args: args[person]{ + s: []person{ + { + Name: "John", + Age: 30, + }, + { + Name: "Jane", + Age: 25, + }, + { + Name: "Bob", + Age: 40, + }, + }, + e: person{ + Name: "Jane", + Age: 30, + }, + }, + want: false, + }, + } + + for _, tt := range intTests { + t.Run(tt.name, func(t *testing.T) { + if got := Contains(tt.args.s, tt.args.e); got != tt.want { + t.Errorf("Contains() = %v, want %v", got, tt.want) + } + }) + } + + for _, tt := range stringTests { + t.Run(tt.name, func(t *testing.T) { + if got := Contains(tt.args.s, tt.args.e); got != tt.want { + t.Errorf("Contains() = %v, want %v", got, tt.want) + } + }) + } + + for _, tt := range personTests { + t.Run(tt.name, func(t *testing.T) { + if got := Contains(tt.args.s, tt.args.e); got != tt.want { + t.Errorf("Contains() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/tests/helper/helper.go b/tests/helper/helper.go index f300caaa624..d5216a84bac 100644 --- a/tests/helper/helper.go +++ b/tests/helper/helper.go @@ -55,6 +55,10 @@ const ( StringFalse = "false" StringTrue = "true" + + StrimziVersion = "0.35.0" + StrimziChartName = "strimzi" + StrimziNamespace = "strimzi" ) const ( diff --git a/tests/scalers/apache_kafka/apache_kafka_test.go b/tests/scalers/apache_kafka/apache_kafka_test.go new file mode 100644 index 00000000000..c5cb6524f7c --- /dev/null +++ b/tests/scalers/apache_kafka/apache_kafka_test.go @@ -0,0 +1,591 @@ +//go:build e2e +// +build e2e + +package apache_kafka_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/joho/godotenv" + "github.com/stretchr/testify/assert" + "k8s.io/client-go/kubernetes" + + . "github.com/kedacore/keda/v2/tests/helper" +) + +// Load environment variables from .env file +var _ = godotenv.Load("../../.env") + +const ( + testName = "apache-kafka-test" +) + +var ( + testNamespace = fmt.Sprintf("%s-ns", testName) + deploymentName = fmt.Sprintf("%s-deployment", testName) + kafkaName = fmt.Sprintf("%s-kafka", testName) + kafkaClientName = fmt.Sprintf("%s-client", testName) + scaledObjectName = fmt.Sprintf("%s-so", testName) + bootstrapServer = fmt.Sprintf("%s-kafka-bootstrap.%s:9092", kafkaName, testNamespace) + topic1 = "kafka-topic" + topic2 = "kafka-topic2" + zeroInvalidOffsetTopic = "kafka-topic-zero-invalid-offset" + oneInvalidOffsetTopic = "kafka-topic-one-invalid-offset" + invalidOffsetGroup = "invalidOffset" + persistentLagTopic = "kafka-topic-persistent-lag" + persistentLagGroup = "persistentLag" + persistentLagDeploymentGroup = "persistentLagDeploymentGroup" + topicPartitions = 3 +) + +type templateData struct { + TestNamespace string + DeploymentName string + ScaledObjectName string + KafkaName string + KafkaTopicName string + KafkaTopicPartitions int + KafkaClientName string + TopicName string + Topic1Name string + Topic2Name string + BootstrapServer string + ResetPolicy string + Params string + Commit string + ScaleToZeroOnInvalid string + ExcludePersistentLag string +} + +const ( + singleDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: kafka-consumer + template: + metadata: + labels: + app: kafka-consumer + spec: + containers: + # only recent version of kafka-console-consumer support flag "include" + # old version's equiv flag will violate language-matters commit hook + # work around -> create two consumer container joining the same group + - name: kafka-consumer + image: confluentinc/cp-kafka:5.2.1 + command: + - sh + - -c + - "kafka-console-consumer --bootstrap-server {{.BootstrapServer}} {{.Params}} --consumer-property enable.auto.commit={{.Commit}}" +` + + multiDeploymentTemplate = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{.DeploymentName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + replicas: 0 + selector: + matchLabels: + app: kafka-consumer + template: + metadata: + labels: + app: kafka-consumer + spec: + containers: + # only recent version of kafka-console-consumer support flag "include" + # old version's equiv flag will violate language-matters commit hook + # work around -> create two consumer container joining the same group + - name: kafka-consumer + image: confluentinc/cp-kafka:5.2.1 + command: + - sh + - -c + - "kafka-console-consumer --bootstrap-server {{.BootstrapServer}} --topic '{{.Topic1Name}}' --group multiTopic --from-beginning --consumer-property enable.auto.commit=false" + - name: kafka-consumer-2 + image: confluentinc/cp-kafka:5.2.1 + command: + - sh + - -c + - "kafka-console-consumer --bootstrap-server {{.BootstrapServer}} --topic '{{.Topic2Name}}' --group multiTopic --from-beginning --consumer-property enable.auto.commit=false" +` + + singleScaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + pollingInterval: 5 + cooldownPeriod: 0 + scaleTargetRef: + name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: apache-kafka + metadata: + topic: {{.TopicName}} + bootstrapServers: {{.BootstrapServer}} + consumerGroup: {{.ResetPolicy}} + lagThreshold: '1' + activationLagThreshold: '1' + offsetResetPolicy: {{.ResetPolicy}}` + + multiScaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + pollingInterval: 5 + cooldownPeriod: 0 + scaleTargetRef: + name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: apache-kafka + metadata: + topic: {{.TopicName}} + bootstrapServers: {{.BootstrapServer}} + consumerGroup: multiTopic + lagThreshold: '1' + offsetResetPolicy: 'latest'` + + invalidOffsetScaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + pollingInterval: 5 + cooldownPeriod: 0 + scaleTargetRef: + name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: apache-kafka + metadata: + topic: {{.TopicName}} + bootstrapServers: {{.BootstrapServer}} + consumerGroup: {{.ResetPolicy}} + lagThreshold: '1' + scaleToZeroOnInvalidOffset: '{{.ScaleToZeroOnInvalid}}' + offsetResetPolicy: 'latest'` + + persistentLagScaledObjectTemplate = ` +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{.ScaledObjectName}} + namespace: {{.TestNamespace}} + labels: + app: {{.DeploymentName}} +spec: + pollingInterval: 5 + cooldownPeriod: 0 + scaleTargetRef: + name: {{.DeploymentName}} + advanced: + horizontalPodAutoscalerConfig: + behavior: + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleDown: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + triggers: + - type: apache-kafka + metadata: + topic: {{.TopicName}} + bootstrapServers: {{.BootstrapServer}} + consumerGroup: {{.ResetPolicy}} + lagThreshold: '1' + excludePersistentLag: '{{.ExcludePersistentLag}}' + offsetResetPolicy: 'latest'` + + kafkaClusterTemplate = `apiVersion: kafka.strimzi.io/v1beta2 +kind: Kafka +metadata: + name: {{.KafkaName}} + namespace: {{.TestNamespace}} +spec: + kafka: + version: "3.4.0" + replicas: 1 + listeners: + - name: plain + port: 9092 + type: internal + tls: false + - name: tls + port: 9093 + type: internal + tls: true + config: + offsets.topic.replication.factor: 1 + transaction.state.log.replication.factor: 1 + transaction.state.log.min.isr: 1 + log.message.format.version: "2.5" + storage: + type: ephemeral + zookeeper: + replicas: 1 + storage: + type: ephemeral + entityOperator: + topicOperator: {} + userOperator: {} +` + + kafkaTopicTemplate = `apiVersion: kafka.strimzi.io/v1beta2 +kind: KafkaTopic +metadata: + name: {{.KafkaTopicName}} + namespace: {{.TestNamespace}} + labels: + strimzi.io/cluster: {{.KafkaName}} + namespace: {{.TestNamespace}} +spec: + partitions: {{.KafkaTopicPartitions}} + replicas: 1 + config: + retention.ms: 604800000 + segment.bytes: 1073741824 +` + kafkaClientTemplate = ` +apiVersion: v1 +kind: Pod +metadata: + name: {{.KafkaClientName}} + namespace: {{.TestNamespace}} +spec: + containers: + - name: {{.KafkaClientName}} + image: confluentinc/cp-kafka:5.2.1 + command: + - sh + - -c + - "exec tail -f /dev/null"` +) + +func TestScaler(t *testing.T) { + // setup + t.Log("--- setting up ---") + // Create kubernetes resources + kc := GetKubernetesClient(t) + data, templates := getTemplateData() + CreateKubernetesResources(t, kc, testNamespace, data, templates) + defer DeleteKubernetesResources(t, testNamespace, data, templates) + addCluster(t, data) + addTopic(t, data, topic1, topicPartitions) + addTopic(t, data, topic2, topicPartitions) + addTopic(t, data, zeroInvalidOffsetTopic, 1) + addTopic(t, data, oneInvalidOffsetTopic, 1) + addTopic(t, data, persistentLagTopic, topicPartitions) + + // test scaling + testEarliestPolicy(t, kc, data) + testLatestPolicy(t, kc, data) + testMultiTopic(t, kc, data) + testZeroOnInvalidOffset(t, kc, data) + testOneOnInvalidOffset(t, kc, data) + testPersistentLag(t, kc, data) +} + +func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing earliest policy: scale out ---") + data.Params = fmt.Sprintf("--topic %s --group earliest --from-beginning", topic1) + data.Commit = StringFalse + data.TopicName = topic1 + data.ResetPolicy = "earliest" + KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + KubectlApplyWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) + + // Shouldn't scale pods applying earliest policy + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) + + // Shouldn't scale pods with only 1 message due to activation value + publishMessage(t, topic1) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) + + // Scale application with kafka messages + publishMessage(t, topic1) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 2), + "replica count should be %d after 2 minute", 2) + + // Scale application beyond partition max. + messages := 5 + for i := 0; i < messages; i++ { + publishMessage(t, topic1) + } + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, topicPartitions, 60, 2), + "replica count should be %d after 2 minute", messages) +} + +func testLatestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing latest policy: scale out ---") + commitPartition(t, topic1, "latest") + data.Params = fmt.Sprintf("--topic %s --group latest", topic1) + data.Commit = StringFalse + data.TopicName = topic1 + data.ResetPolicy = "latest" + KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + KubectlApplyWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) + + // Shouldn't scale pods + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) + + // Shouldn't scale pods with only 1 message due to activation value + publishMessage(t, topic1) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) + + // Scale application with kafka messages + publishMessage(t, topic1) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 2), + "replica count should be %d after 2 minute", 2) + + // Scale application beyond partition max. + messages := 5 + for i := 0; i < messages; i++ { + publishMessage(t, topic1) + } + + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, topicPartitions, 60, 2), + "replica count should be %d after 2 minute", messages) +} + +func testMultiTopic(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing multi topic: scale out ---") + commitPartition(t, topic1, "multiTopic") + commitPartition(t, topic2, "multiTopic") + data.TopicName = fmt.Sprintf("%s,%s", topic1, topic2) + KubectlApplyWithTemplate(t, data, "multiDeploymentTemplate", multiDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "multiDeploymentTemplate", multiDeploymentTemplate) + KubectlApplyWithTemplate(t, data, "multiScaledObjectTemplate", multiScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "multiScaledObjectTemplate", multiScaledObjectTemplate) + + // Shouldn't scale pods + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) + + // Scale application with kafka messages in topic 1 + publishMessage(t, topic1) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 2), + "replica count should be %d after 2 minute", 1) + + // Scale application with kafka messages in topic 2 + // // produce one more msg to the different topic within the same group + // // will turn total consumer group lag to 2. + // // with lagThreshold as 1 -> making hpa AverageValue to 1 + // // this should turn nb of replicas to 2 + // // as desiredReplicaCount = totalLag / avgThreshold + publishMessage(t, topic2) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 2), + "replica count should be %d after 2 minute", 2) +} + +func testZeroOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing zeroInvalidOffsetTopic: scale out ---") + data.Params = fmt.Sprintf("--topic %s --group %s", zeroInvalidOffsetTopic, invalidOffsetGroup) + data.Commit = StringTrue + data.TopicName = zeroInvalidOffsetTopic + data.ResetPolicy = invalidOffsetGroup + data.ScaleToZeroOnInvalid = StringTrue + KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + KubectlApplyWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) + + // Shouldn't scale pods + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) +} + +func testOneOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing oneInvalidOffsetTopic: scale out ---") + data.Params = fmt.Sprintf("--topic %s --group %s --from-beginning", oneInvalidOffsetTopic, invalidOffsetGroup) + data.Commit = StringTrue + data.TopicName = oneInvalidOffsetTopic + data.ResetPolicy = invalidOffsetGroup + data.ScaleToZeroOnInvalid = StringFalse + KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + KubectlApplyWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) + + // Should scale to 1 + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 2), + "replica count should be %d after 2 minute", 1) + + commitPartition(t, oneInvalidOffsetTopic, invalidOffsetGroup) + publishMessage(t, oneInvalidOffsetTopic) + + // Should scale to 0 + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 10), + "replica count should be %d after 10 minute", 0) +} + +func publishMessage(t *testing.T, topic string) { + _, _, err := ExecCommandOnSpecificPod(t, kafkaClientName, testNamespace, fmt.Sprintf(`echo "{"text": "foo"}" | kafka-console-producer --broker-list %s --topic %s`, bootstrapServer, topic)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) +} + +func commitPartition(t *testing.T, topic string, group string) { + _, _, err := ExecCommandOnSpecificPod(t, kafkaClientName, testNamespace, fmt.Sprintf(`kafka-console-consumer --bootstrap-server %s --topic %s --group %s --from-beginning --consumer-property enable.auto.commit=true --timeout-ms 15000`, bootstrapServer, topic, group)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) +} + +func testPersistentLag(t *testing.T, kc *kubernetes.Clientset, data templateData) { + t.Log("--- testing persistentLag: no scale out ---") + + // Simulate Consumption from topic by consumer group + // To avoid edge case where where scaling could be effectively disabled (Consumer never makes a commit) + data.Params = fmt.Sprintf("--topic %s --group %s --from-beginning", persistentLagTopic, persistentLagGroup) + data.Commit = StringTrue + data.TopicName = persistentLagTopic + data.ResetPolicy = persistentLagGroup + data.ExcludePersistentLag = StringTrue + KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + KubectlApplyWithTemplate(t, data, "persistentLagScaledObjectTemplate", persistentLagScaledObjectTemplate) + + // Scale application with kafka messages in persistentLagTopic + publishMessage(t, persistentLagTopic) + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 2), + "replica count should be %d after 2 minute", 1) + // Recreate Deployment to delibrately assign different consumer group to deployment and scaled object + // This is to simulate inability to consume from topic + // Scaled Object remains unchanged + KubernetesScaleDeployment(t, kc, deploymentName, 0, testNamespace) + assert.True(t, WaitForPodsTerminated(t, kc, "app=kafka-consumer", testNamespace, 60, 2), + "pod should be terminated after %d minute", 2) + + data.Params = fmt.Sprintf("--topic %s --group %s --from-beginning", persistentLagTopic, persistentLagDeploymentGroup) + KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + + messages := 5 + for i := 0; i < messages; i++ { + publishMessage(t, persistentLagTopic) + } + + // Persistent Lag should not scale pod above minimum replicas after 2 reconciliation cycles + assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 2), + "replica count should be %d after 2 minute", 1) + + // Shouldn't scale pods + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 1, 30) + + KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + KubectlDeleteWithTemplate(t, data, "persistentLagScaledObjectTemplate", persistentLagScaledObjectTemplate) +} + +func addTopic(t *testing.T, data templateData, name string, partitions int) { + t.Log("--- adding kafka topic" + name + " and partitions " + strconv.Itoa(partitions) + " ---") + data.KafkaTopicName = name + data.KafkaTopicPartitions = partitions + KubectlApplyWithTemplate(t, data, "kafkaTopicTemplate", kafkaTopicTemplate) + _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=480s --namespace %s", name, testNamespace)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) + t.Log("--- kafka topic added ---") +} + +func addCluster(t *testing.T, data templateData) { + t.Log("--- adding kafka cluster ---") + KubectlApplyWithTemplate(t, data, "kafkaClusterTemplate", kafkaClusterTemplate) + _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=480s --namespace %s", kafkaName, testNamespace)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) + t.Log("--- kafka cluster added ---") +} + +func getTemplateData() (templateData, []Template) { + return templateData{ + TestNamespace: testNamespace, + DeploymentName: deploymentName, + KafkaName: kafkaName, + KafkaClientName: kafkaClientName, + BootstrapServer: bootstrapServer, + TopicName: topic1, + Topic1Name: topic1, + Topic2Name: topic2, + ResetPolicy: "", + ScaledObjectName: scaledObjectName, + }, []Template{ + {Name: "kafkaClientTemplate", Config: kafkaClientTemplate}, + } +} diff --git a/tests/scalers/kafka/kafka_test.go b/tests/scalers/kafka/kafka_test.go index 5547bf282a0..b0f929c5d8a 100644 --- a/tests/scalers/kafka/kafka_test.go +++ b/tests/scalers/kafka/kafka_test.go @@ -5,6 +5,7 @@ package kafka_test import ( "fmt" + "strconv" "testing" "github.com/joho/godotenv" @@ -28,7 +29,6 @@ var ( kafkaClientName = fmt.Sprintf("%s-client", testName) scaledObjectName = fmt.Sprintf("%s-so", testName) bootstrapServer = fmt.Sprintf("%s-kafka-bootstrap.%s:9092", kafkaName, testNamespace) - strimziOperatorVersion = "0.30.0" topic1 = "kafka-topic" topic2 = "kafka-topic2" zeroInvalidOffsetTopic = "kafka-topic-zero-invalid-offset" @@ -283,7 +283,7 @@ metadata: namespace: {{.TestNamespace}} spec: kafka: - version: "3.1.0" + version: "3.4.0" replicas: 1 listeners: - name: plain @@ -348,7 +348,7 @@ func TestScaler(t *testing.T) { kc := GetKubernetesClient(t) data, templates := getTemplateData() CreateKubernetesResources(t, kc, testNamespace, data, templates) - installKafkaOperator(t) + defer DeleteKubernetesResources(t, testNamespace, data, templates) addCluster(t, data) addTopic(t, data, topic1, topicPartitions) addTopic(t, data, topic2, topicPartitions) @@ -363,10 +363,6 @@ func TestScaler(t *testing.T) { testZeroOnInvalidOffset(t, kc, data) testOneOnInvalidOffset(t, kc, data) testPersistentLag(t, kc, data) - - // cleanup - uninstallKafkaOperator(t) - DeleteKubernetesResources(t, testNamespace, data, templates) } func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -376,7 +372,9 @@ func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateDat data.TopicName = topic1 data.ResetPolicy = "earliest" KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) KubectlApplyWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) // Shouldn't scale pods applying earliest policy AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) @@ -398,9 +396,6 @@ func testEarliestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateDat assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, topicPartitions, 60, 2), "replica count should be %d after 2 minute", messages) - - KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) - KubectlDeleteWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) } func testLatestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -411,7 +406,9 @@ func testLatestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) data.TopicName = topic1 data.ResetPolicy = "latest" KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) KubectlApplyWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) // Shouldn't scale pods AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) @@ -433,19 +430,16 @@ func testLatestPolicy(t *testing.T, kc *kubernetes.Clientset, data templateData) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, topicPartitions, 60, 2), "replica count should be %d after 2 minute", messages) - - KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) - KubectlDeleteWithTemplate(t, data, "singleScaledObjectTemplate", singleScaledObjectTemplate) } func testMultiTopic(t *testing.T, kc *kubernetes.Clientset, data templateData) { t.Log("--- testing multi topic: scale out ---") commitPartition(t, topic1, "multiTopic") commitPartition(t, topic2, "multiTopic") - data.Topic1Name = topic1 - data.Topic2Name = topic2 KubectlApplyWithTemplate(t, data, "multiDeploymentTemplate", multiDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "multiDeploymentTemplate", multiDeploymentTemplate) KubectlApplyWithTemplate(t, data, "multiScaledObjectTemplate", multiScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "multiScaledObjectTemplate", multiScaledObjectTemplate) // Shouldn't scale pods AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) @@ -464,9 +458,6 @@ func testMultiTopic(t *testing.T, kc *kubernetes.Clientset, data templateData) { publishMessage(t, topic2) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 2, 60, 2), "replica count should be %d after 2 minute", 2) - - KubectlDeleteWithTemplate(t, data, "multiDeploymentTemplate", multiDeploymentTemplate) - KubectlDeleteWithTemplate(t, data, "multiScaledObjectTemplate", multiScaledObjectTemplate) } func testZeroOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -477,13 +468,12 @@ func testZeroOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templa data.ResetPolicy = invalidOffsetGroup data.ScaleToZeroOnInvalid = StringTrue KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) KubectlApplyWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) // Shouldn't scale pods AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, 0, 30) - - KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) - KubectlDeleteWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) } func testOneOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templateData) { @@ -494,7 +484,9 @@ func testOneOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templat data.ResetPolicy = invalidOffsetGroup data.ScaleToZeroOnInvalid = StringFalse KubectlApplyWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) + defer KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) KubectlApplyWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) + defer KubectlDeleteWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) // Should scale to 1 assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 1, 60, 2), @@ -506,9 +498,6 @@ func testOneOnInvalidOffset(t *testing.T, kc *kubernetes.Clientset, data templat // Should scale to 0 assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, 0, 60, 10), "replica count should be %d after 10 minute", 0) - - KubectlDeleteWithTemplate(t, data, "singleDeploymentTemplate", singleDeploymentTemplate) - KubectlDeleteWithTemplate(t, data, "invalidOffsetScaledObjectTemplate", invalidOffsetScaledObjectTemplate) } func publishMessage(t *testing.T, topic string) { @@ -564,37 +553,22 @@ func testPersistentLag(t *testing.T, kc *kubernetes.Clientset, data templateData KubectlDeleteWithTemplate(t, data, "persistentLagScaledObjectTemplate", persistentLagScaledObjectTemplate) } -func installKafkaOperator(t *testing.T) { - _, err := ExecuteCommand("helm repo add strimzi https://strimzi.io/charts/") - assert.NoErrorf(t, err, "cannot execute command - %s", err) - _, err = ExecuteCommand("helm repo update") - assert.NoErrorf(t, err, "cannot execute command - %s", err) - _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --namespace %s --wait %s strimzi/strimzi-kafka-operator --version %s`, - testNamespace, - testName, - strimziOperatorVersion)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) -} - -func uninstallKafkaOperator(t *testing.T) { - _, err := ExecuteCommand(fmt.Sprintf(`helm uninstall --namespace %s %s`, - testNamespace, - testName)) - assert.NoErrorf(t, err, "cannot execute command - %s", err) -} - func addTopic(t *testing.T, data templateData, name string, partitions int) { + t.Log("--- adding kafka topic" + name + " and partitions " + strconv.Itoa(partitions) + " ---") data.KafkaTopicName = name data.KafkaTopicPartitions = partitions KubectlApplyWithTemplate(t, data, "kafkaTopicTemplate", kafkaTopicTemplate) _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafkatopic/%s --for=condition=Ready --timeout=480s --namespace %s", name, testNamespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) + t.Log("--- kafka topic added ---") } func addCluster(t *testing.T, data templateData) { + t.Log("--- adding kafka cluster ---") KubectlApplyWithTemplate(t, data, "kafkaClusterTemplate", kafkaClusterTemplate) _, err := ExecuteCommand(fmt.Sprintf("kubectl wait kafka/%s --for=condition=Ready --timeout=480s --namespace %s", kafkaName, testNamespace)) assert.NoErrorf(t, err, "cannot execute command - %s", err) + t.Log("--- kafka cluster added ---") } func getTemplateData() (templateData, []Template) { diff --git a/tests/utils/cleanup_test.go b/tests/utils/cleanup_test.go index 87e718bd318..a723c35f2a5 100644 --- a/tests/utils/cleanup_test.go +++ b/tests/utils/cleanup_test.go @@ -89,3 +89,14 @@ func TestRemoveCertManager(t *testing.T) { func TestRemoveAzureManagedPrometheusComponents(t *testing.T) { KubectlDeleteWithTemplate(t, helper.EmptyTemplateData{}, "azureManagedPrometheusConfigMapTemplate", helper.AzureManagedPrometheusConfigMapTemplate) } + +func TestRemoveStrimzi(t *testing.T) { + _, err := ExecuteCommand(fmt.Sprintf(`helm uninstall --namespace %s %s`, + StrimziNamespace, + StrimziChartName)) + require.NoErrorf(t, err, "cannot uninstall strimzi - %s", err) + + KubeClient = GetKubernetesClient(t) + + DeleteNamespace(t, StrimziNamespace) +} diff --git a/tests/utils/setup_test.go b/tests/utils/setup_test.go index abcbafc4c66..33809cdc440 100644 --- a/tests/utils/setup_test.go +++ b/tests/utils/setup_test.go @@ -230,3 +230,23 @@ func TestSetupAadPodIdentityComponents(t *testing.T) { AzureAdPodIdentityNamespace, AzureADMsiClientID, AzureADMsiID)) require.NoErrorf(t, err, "cannot install aad pod identity webhook - %s", err) } + +func TestSetUpStrimzi(t *testing.T) { + t.Log("--- installing kafka operator ---") + _, err := ExecuteCommand("helm repo add strimzi https://strimzi.io/charts/") + assert.NoErrorf(t, err, "cannot execute command - %s", err) + _, err = ExecuteCommand("helm repo update") + assert.NoErrorf(t, err, "cannot execute command - %s", err) + + KubeClient = GetKubernetesClient(t) + + CreateNamespace(t, KubeClient, StrimziNamespace) + + _, err = ExecuteCommand(fmt.Sprintf(`helm upgrade --install --namespace %s --wait %s strimzi/strimzi-kafka-operator --version %s --set watchAnyNamespace=true`, + StrimziNamespace, + StrimziChartName, + StrimziVersion)) + assert.NoErrorf(t, err, "cannot execute command - %s", err) + + t.Log("--- kafka operator installed ---") +} diff --git a/vendor/github.com/klauspost/compress/s2/.gitignore b/vendor/github.com/klauspost/compress/s2/.gitignore new file mode 100644 index 00000000000..3a89c6e3e26 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/.gitignore @@ -0,0 +1,15 @@ +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/s2/LICENSE b/vendor/github.com/klauspost/compress/s2/LICENSE new file mode 100644 index 00000000000..1d2d645bd93 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/s2/README.md b/vendor/github.com/klauspost/compress/s2/README.md new file mode 100644 index 00000000000..8284bb0810c --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/README.md @@ -0,0 +1,1120 @@ +# S2 Compression + +S2 is an extension of [Snappy](https://github.com/google/snappy). + +S2 is aimed for high throughput, which is why it features concurrent compression for bigger payloads. + +Decoding is compatible with Snappy compressed content, but content compressed with S2 cannot be decompressed by Snappy. +This means that S2 can seamlessly replace Snappy without converting compressed content. + +S2 can produce Snappy compatible output, faster and better than Snappy. +If you want full benefit of the changes you should use s2 without Snappy compatibility. + +S2 is designed to have high throughput on content that cannot be compressed. +This is important, so you don't have to worry about spending CPU cycles on already compressed data. + +## Benefits over Snappy + +* Better compression +* Adjustable compression (3 levels) +* Concurrent stream compression +* Faster decompression, even for Snappy compatible content +* Concurrent Snappy/S2 stream decompression +* Skip forward in compressed stream +* Random seeking with indexes +* Compatible with reading Snappy compressed content +* Smaller block size overhead on incompressible blocks +* Block concatenation +* Block Dictionary support +* Uncompressed stream mode +* Automatic stream size padding +* Snappy compatible block compression + +## Drawbacks over Snappy + +* Not optimized for 32 bit systems +* Streams use slightly more memory due to larger blocks and concurrency (configurable) + +# Usage + +Installation: `go get -u github.com/klauspost/compress/s2` + +Full package documentation: + +[![godoc][1]][2] + +[1]: https://godoc.org/github.com/klauspost/compress?status.svg +[2]: https://godoc.org/github.com/klauspost/compress/s2 + +## Compression + +```Go +func EncodeStream(src io.Reader, dst io.Writer) error { + enc := s2.NewWriter(dst) + _, err := io.Copy(enc, src) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +You should always call `enc.Close()`, otherwise you will leak resources and your encode will be incomplete. + +For the best throughput, you should attempt to reuse the `Writer` using the `Reset()` method. + +The Writer in S2 is always buffered, therefore `NewBufferedWriter` in Snappy can be replaced with `NewWriter` in S2. +It is possible to flush any buffered data using the `Flush()` method. +This will block until all data sent to the encoder has been written to the output. + +S2 also supports the `io.ReaderFrom` interface, which will consume all input from a reader. + +As a final method to compress data, if you have a single block of data you would like to have encoded as a stream, +a slightly more efficient method is to use the `EncodeBuffer` method. +This will take ownership of the buffer until the stream is closed. + +```Go +func EncodeStream(src []byte, dst io.Writer) error { + enc := s2.NewWriter(dst) + // The encoder owns the buffer until Flush or Close is called. + err := enc.EncodeBuffer(buf) + if err != nil { + enc.Close() + return err + } + // Blocks until compression is done. + return enc.Close() +} +``` + +Each call to `EncodeBuffer` will result in discrete blocks being created without buffering, +so it should only be used a single time per stream. +If you need to write several blocks, you should use the regular io.Writer interface. + + +## Decompression + +```Go +func DecodeStream(src io.Reader, dst io.Writer) error { + dec := s2.NewReader(src) + _, err := io.Copy(dst, dec) + return err +} +``` + +Similar to the Writer, a Reader can be reused using the `Reset` method. + +For the best possible throughput, there is a `EncodeBuffer(buf []byte)` function available. +However, it requires that the provided buffer isn't used after it is handed over to S2 and until the stream is flushed or closed. + +For smaller data blocks, there is also a non-streaming interface: `Encode()`, `EncodeBetter()` and `Decode()`. +Do however note that these functions (similar to Snappy) does not provide validation of data, +so data corruption may be undetected. Stream encoding provides CRC checks of data. + +It is possible to efficiently skip forward in a compressed stream using the `Skip()` method. +For big skips the decompressor is able to skip blocks without decompressing them. + +## Single Blocks + +Similar to Snappy S2 offers single block compression. +Blocks do not offer the same flexibility and safety as streams, +but may be preferable for very small payloads, less than 100K. + +Using a simple `dst := s2.Encode(nil, src)` will compress `src` and return the compressed result. +It is possible to provide a destination buffer. +If the buffer has a capacity of `s2.MaxEncodedLen(len(src))` it will be used. +If not a new will be allocated. + +Alternatively `EncodeBetter`/`EncodeBest` can also be used for better, but slightly slower compression. + +Similarly to decompress a block you can use `dst, err := s2.Decode(nil, src)`. +Again an optional destination buffer can be supplied. +The `s2.DecodedLen(src)` can be used to get the minimum capacity needed. +If that is not satisfied a new buffer will be allocated. + +Block function always operate on a single goroutine since it should only be used for small payloads. + +# Commandline tools + +Some very simply commandline tools are provided; `s2c` for compression and `s2d` for decompression. + +Binaries can be downloaded on the [Releases Page](https://github.com/klauspost/compress/releases). + +Installing then requires Go to be installed. To install them, use: + +`go install github.com/klauspost/compress/s2/cmd/s2c@latest && go install github.com/klauspost/compress/s2/cmd/s2d@latest` + +To build binaries to the current folder use: + +`go build github.com/klauspost/compress/s2/cmd/s2c && go build github.com/klauspost/compress/s2/cmd/s2d` + + +## s2c + +``` +Usage: s2c [options] file1 file2 + +Compresses all files supplied as input separately. +Output files are written as 'filename.ext.s2' or 'filename.ext.snappy'. +By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and compressed. +Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -blocksize string + Max block size. Examples: 64K, 256K, 1M, 4M. Must be power of two and <= 4MB (default "4M") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -faster + Compress faster, but with a minor compression loss + -help + Display help + -index + Add seek index (default true) + -o string + Write output to another file. Single input file only + -pad string + Pad size to a multiple of this value, Examples: 500, 64K, 256K, 1M, 4M, etc (default "1") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -slower + Compress more, but a lot slower + -snappy + Generate Snappy compatible output stream + -verify + Verify written files + +``` + +## s2d + +``` +Usage: s2d [options] file1 file2 + +Decompresses all files supplied as input. Input files must end with '.s2' or '.snappy'. +Output file names have the extension removed. By default output files will be overwritten. +Use - as the only file name to read from stdin and write to stdout. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +File names beginning with 'http://' and 'https://' will be downloaded and decompressed. +Extensions on downloaded files are ignored. Only http response code 200 is accepted. + +Options: + -bench int + Run benchmark n times. No output will be written + -c Write all output to stdout. Multiple input files will be concatenated + -help + Display help + -o string + Write output to another file. Single input file only + -offset string + Start at offset. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful decompression + -safe + Do not overwrite output files + -tail string + Return last of compressed file. Examples: 92, 64K, 256K, 1M, 4M. Requires Index + -verify + Verify files, but do not write output +``` + +## s2sx: self-extracting archives + +s2sx allows creating self-extracting archives with no dependencies. + +By default, executables are created for the same platforms as the host os, +but this can be overridden with `-os` and `-arch` parameters. + +Extracted files have 0666 permissions, except when untar option used. + +``` +Usage: s2sx [options] file1 file2 + +Compresses all files supplied as input separately. +If files have '.s2' extension they are assumed to be compressed already. +Output files are written as 'filename.s2sx' and with '.exe' for windows targets. +If output is big, an additional file with ".more" is written. This must be included as well. +By default output files will be overwritten. + +Wildcards are accepted: testdir/*.txt will compress all files in testdir ending with .txt +Directories can be wildcards as well. testdir/*/*.txt will match testdir/subdir/b.txt + +Options: + -arch string + Destination architecture (default "amd64") + -c Write all output to stdout. Multiple input files will be concatenated + -cpu int + Compress using this amount of threads (default 32) + -help + Display help + -max string + Maximum executable size. Rest will be written to another file. (default "1G") + -os string + Destination operating system (default "windows") + -q Don't write any output to terminal, except errors + -rm + Delete source file(s) after successful compression + -safe + Do not overwrite output files + -untar + Untar on destination +``` + +Available platforms are: + + * darwin-amd64 + * darwin-arm64 + * linux-amd64 + * linux-arm + * linux-arm64 + * linux-mips64 + * linux-ppc64le + * windows-386 + * windows-amd64 + +By default, there is a size limit of 1GB for the output executable. + +When this is exceeded the remaining file content is written to a file called +output+`.more`. This file must be included for a successful extraction and +placed alongside the executable for a successful extraction. + +This file *must* have the same name as the executable, so if the executable is renamed, +so must the `.more` file. + +This functionality is disabled with stdin/stdout. + +### Self-extracting TAR files + +If you wrap a TAR file you can specify `-untar` to make it untar on the destination host. + +Files are extracted to the current folder with the path specified in the tar file. + +Note that tar files are not validated before they are wrapped. + +For security reasons files that move below the root folder are not allowed. + +# Performance + +This section will focus on comparisons to Snappy. +This package is solely aimed at replacing Snappy as a high speed compression package. +If you are mainly looking for better compression [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) +gives better compression, but typically at speeds slightly below "better" mode in this package. + +Compression is increased compared to Snappy, mostly around 5-20% and the throughput is typically 25-40% increased (single threaded) compared to the Snappy Go implementation. + +Streams are concurrently compressed. The stream will be distributed among all available CPU cores for the best possible throughput. + +A "better" compression mode is also available. This allows to trade a bit of speed for a minor compression gain. +The content compressed in this mode is fully compatible with the standard decoder. + +Snappy vs S2 **compression** speed on 16 core (32 thread) computer, using all threads and a single thread (1 CPU): + +| File | S2 Speed | S2 Throughput | S2 % smaller | S2 "better" | "better" throughput | "better" % smaller | +|---------------------------------------------------------------------------------------------------------|----------|---------------|--------------|-------------|---------------------|--------------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 16.33x | 10556 MB/s | 8.0% | 6.04x | 5252 MB/s | 14.7% | +| (1 CPU) | 1.08x | 940 MB/s | - | 0.46x | 400 MB/s | - | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 16.51x | 15224 MB/s | 31.70% | 9.47x | 8734 MB/s | 37.71% | +| (1 CPU) | 1.26x | 1157 MB/s | - | 0.60x | 556 MB/s | - | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 15.14x | 12598 MB/s | -5.76% | 6.23x | 5675 MB/s | 3.62% | +| (1 CPU) | 1.02x | 932 MB/s | - | 0.47x | 432 MB/s | - | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 11.21x | 12116 MB/s | 15.95% | 3.24x | 3500 MB/s | 18.00% | +| (1 CPU) | 1.05x | 1135 MB/s | - | 0.27x | 292 MB/s | - | +| [apache.log](https://files.klauspost.com/compress/apache.log.zst) | 8.55x | 16673 MB/s | 20.54% | 5.85x | 11420 MB/s | 24.97% | +| (1 CPU) | 1.91x | 1771 MB/s | - | 0.53x | 1041 MB/s | - | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 15.76x | 14357 MB/s | 24.01% | 8.67x | 7891 MB/s | 33.68% | +| (1 CPU) | 1.17x | 1064 MB/s | - | 0.65x | 595 MB/s | - | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 13.33x | 9835 MB/s | 2.34% | 6.85x | 4863 MB/s | 9.96% | +| (1 CPU) | 0.97x | 689 MB/s | - | 0.55x | 387 MB/s | - | +| sharnd.out.2gb | 9.11x | 13213 MB/s | 0.01% | 1.49x | 9184 MB/s | 0.01% | +| (1 CPU) | 0.88x | 5418 MB/s | - | 0.77x | 5417 MB/s | - | +| [sofia-air-quality-dataset csv](https://files.klauspost.com/compress/sofia-air-quality-dataset.tar.zst) | 22.00x | 11477 MB/s | 18.73% | 11.15x | 5817 MB/s | 27.88% | +| (1 CPU) | 1.23x | 642 MB/s | - | 0.71x | 642 MB/s | - | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 11.23x | 6520 MB/s | 5.9% | 5.35x | 3109 MB/s | 15.88% | +| (1 CPU) | 1.05x | 607 MB/s | - | 0.52x | 304 MB/s | - | +| [enwik9](https://files.klauspost.com/compress/enwik9.zst) | 19.28x | 8440 MB/s | 4.04% | 9.31x | 4076 MB/s | 18.04% | +| (1 CPU) | 1.12x | 488 MB/s | - | 0.57x | 250 MB/s | - | + +### Legend + +* `S2 Speed`: Speed of S2 compared to Snappy, using 16 cores and 1 core. +* `S2 Throughput`: Throughput of S2 in MB/s. +* `S2 % smaller`: How many percent of the Snappy output size is S2 better. +* `S2 "better"`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" throughput`: Speed when enabling "better" compression mode in S2 compared to Snappy. +* `"better" % smaller`: How many percent of the Snappy output size is S2 better when using "better" compression. + +There is a good speedup across the board when using a single thread and a significant speedup when using multiple threads. + +Machine generated data gets by far the biggest compression boost, with size being reduced by up to 35% of Snappy size. + +The "better" compression mode sees a good improvement in all cases, but usually at a performance cost. + +Incompressible content (`sharnd.out.2gb`, 2GB random data) sees the smallest speedup. +This is likely dominated by synchronization overhead, which is confirmed by the fact that single threaded performance is higher (see above). + +## Decompression + +S2 attempts to create content that is also fast to decompress, except in "better" mode where the smallest representation is used. + +S2 vs Snappy **decompression** speed. Both operating on single core: + +| File | S2 Throughput | vs. Snappy | Better Throughput | vs. Snappy | +|-----------------------------------------------------------------------------------------------------|---------------|------------|-------------------|------------| +| [rawstudio-mint14.tar](https://files.klauspost.com/compress/rawstudio-mint14.7z) | 2117 MB/s | 1.14x | 1738 MB/s | 0.94x | +| [github-june-2days-2019.json](https://files.klauspost.com/compress/github-june-2days-2019.json.zst) | 2401 MB/s | 1.25x | 2307 MB/s | 1.20x | +| [github-ranks-backup.bin](https://files.klauspost.com/compress/github-ranks-backup.bin.zst) | 2075 MB/s | 0.98x | 1764 MB/s | 0.83x | +| [consensus.db.10gb](https://files.klauspost.com/compress/consensus.db.10gb.zst) | 2967 MB/s | 1.05x | 2885 MB/s | 1.02x | +| [adresser.json](https://files.klauspost.com/compress/adresser.json.zst) | 4141 MB/s | 1.07x | 4184 MB/s | 1.08x | +| [gob-stream](https://files.klauspost.com/compress/gob-stream.7z) | 2264 MB/s | 1.12x | 2185 MB/s | 1.08x | +| [10gb.tar](http://mattmahoney.net/dc/10gb.html) | 1525 MB/s | 1.03x | 1347 MB/s | 0.91x | +| sharnd.out.2gb | 3813 MB/s | 0.79x | 3900 MB/s | 0.81x | +| [enwik9](http://mattmahoney.net/dc/textdata.html) | 1246 MB/s | 1.29x | 967 MB/s | 1.00x | +| [silesia.tar](http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip) | 1433 MB/s | 1.12x | 1203 MB/s | 0.94x | +| [enwik10](https://encode.su/threads/3315-enwik10-benchmark-results) | 1284 MB/s | 1.32x | 1010 MB/s | 1.04x | + +### Legend + +* `S2 Throughput`: Decompression speed of S2 encoded content. +* `Better Throughput`: Decompression speed of S2 "better" encoded content. +* `vs Snappy`: Decompression speed of S2 "better" mode compared to Snappy and absolute speed. + + +While the decompression code hasn't changed, there is a significant speedup in decompression speed. +S2 prefers longer matches and will typically only find matches that are 6 bytes or longer. +While this reduces compression a bit, it improves decompression speed. + +The "better" compression mode will actively look for shorter matches, which is why it has a decompression speed quite similar to Snappy. + +Without assembly decompression is also very fast; single goroutine decompression speed. No assembly: + +| File | S2 Throughput | S2 throughput | +|--------------------------------|---------------|---------------| +| consensus.db.10gb.s2 | 1.84x | 2289.8 MB/s | +| 10gb.tar.s2 | 1.30x | 867.07 MB/s | +| rawstudio-mint14.tar.s2 | 1.66x | 1329.65 MB/s | +| github-june-2days-2019.json.s2 | 2.36x | 1831.59 MB/s | +| github-ranks-backup.bin.s2 | 1.73x | 1390.7 MB/s | +| enwik9.s2 | 1.67x | 681.53 MB/s | +| adresser.json.s2 | 3.41x | 4230.53 MB/s | +| silesia.tar.s2 | 1.52x | 811.58 | + +Even though S2 typically compresses better than Snappy, decompression speed is always better. + +### Concurrent Stream Decompression + +For full stream decompression S2 offers a [DecodeConcurrent](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.DecodeConcurrent) +that will decode a full stream using multiple goroutines. + +Example scaling, AMD Ryzen 3950X, 16 cores, decompression using `s2d -bench=3 `, best of 3: + +| Input | `-cpu=1` | `-cpu=2` | `-cpu=4` | `-cpu=8` | `-cpu=16` | +|-------------------------------------------|------------|------------|------------|------------|-------------| +| enwik10.snappy | 1098.6MB/s | 1819.8MB/s | 3625.6MB/s | 6910.6MB/s | 10818.2MB/s | +| enwik10.s2 | 1303.5MB/s | 2606.1MB/s | 4847.9MB/s | 8878.4MB/s | 9592.1MB/s | +| sofia-air-quality-dataset.tar.snappy | 1302.0MB/s | 2165.0MB/s | 4244.5MB/s | 8241.0MB/s | 12920.5MB/s | +| sofia-air-quality-dataset.tar.s2 | 1399.2MB/s | 2463.2MB/s | 5196.5MB/s | 9639.8MB/s | 11439.5MB/s | +| sofia-air-quality-dataset.tar.s2 (no asm) | 837.5MB/s | 1652.6MB/s | 3183.6MB/s | 5945.0MB/s | 9620.7MB/s | + +Scaling can be expected to be pretty linear until memory bandwidth is saturated. + +For now the DecodeConcurrent can only be used for full streams without seeking or combining with regular reads. + +## Block compression + + +When compressing blocks no concurrent compression is performed just as Snappy. +This is because blocks are for smaller payloads and generally will not benefit from concurrent compression. + +An important change is that incompressible blocks will not be more than at most 10 bytes bigger than the input. +In rare, worst case scenario Snappy blocks could be significantly bigger than the input. + +### Mixed content blocks + +The most reliable is a wide dataset. +For this we use [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. Single goroutine used. + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|------------|------------| +| S2 | 4014735833 | 1059723369 | 73.60% | **936.73** | +| S2 Better | 4014735833 | 961580539 | 76.05% | 451.10 | +| S2 Best | 4014735833 | 899182886 | **77.60%** | 46.84 | +| Snappy | 4014735833 | 1128706759 | 71.89% | 790.15 | +| S2, Snappy Output | 4014735833 | 1093823291 | 72.75% | 936.60 | +| LZ4 | 4014735833 | 1063768713 | 73.50% | 452.02 | + +S2 delivers both the best single threaded throughput with regular mode and the best compression rate with "best". +"Better" mode provides the same compression speed as LZ4 with better compression ratio. + +When outputting Snappy compatible output it still delivers better throughput (150MB/s more) and better compression. + +As can be seen from the other benchmarks decompression should also be easier on the S2 generated output. + +Though they cannot be compared due to different decompression speeds here are the speed/size comparisons for +other Go compressors: + +| * | Input | Output | Reduction | MB/s | +|-------------------|------------|------------|-----------|--------| +| Zstd Fastest (Go) | 4014735833 | 794608518 | 80.21% | 236.04 | +| Zstd Best (Go) | 4014735833 | 704603356 | 82.45% | 35.63 | +| Deflate (Go) l1 | 4014735833 | 871294239 | 78.30% | 214.04 | +| Deflate (Go) l9 | 4014735833 | 730389060 | 81.81% | 41.17 | + +### Standard block compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +These micro-benchmarks are with data in cache and trained branch predictors. For a more realistic benchmark see the mixed content above. + +Block compression. Parallel benchmark running on 16 cores, 16 goroutines. + +AMD64 assembly is use for both S2 and Snappy. + +| Absolute Perf | Snappy size | S2 Size | Snappy Speed | S2 Speed | Snappy dec | S2 dec | +|-----------------------|-------------|---------|--------------|-------------|-------------|-------------| +| html | 22843 | 20868 | 16246 MB/s | 18617 MB/s | 40972 MB/s | 49263 MB/s | +| urls.10K | 335492 | 286541 | 7943 MB/s | 10201 MB/s | 22523 MB/s | 26484 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 303228 MB/s | 718321 MB/s | 827552 MB/s | +| fireworks.jpeg (200B) | 146 | 155 | 8869 MB/s | 20180 MB/s | 33691 MB/s | 52421 MB/s | +| paper-100k.pdf | 85304 | 84202 | 167546 MB/s | 112988 MB/s | 326905 MB/s | 291944 MB/s | +| html_x_4 | 92234 | 20870 | 15194 MB/s | 54457 MB/s | 30843 MB/s | 32217 MB/s | +| alice29.txt | 88034 | 85934 | 5936 MB/s | 6540 MB/s | 12882 MB/s | 20044 MB/s | +| asyoulik.txt | 77503 | 79575 | 5517 MB/s | 6657 MB/s | 12735 MB/s | 22806 MB/s | +| lcet10.txt | 234661 | 220383 | 6235 MB/s | 6303 MB/s | 14519 MB/s | 18697 MB/s | +| plrabn12.txt | 319267 | 318196 | 5159 MB/s | 6074 MB/s | 11923 MB/s | 19901 MB/s | +| geo.protodata | 23335 | 18606 | 21220 MB/s | 25432 MB/s | 56271 MB/s | 62540 MB/s | +| kppkn.gtb | 69526 | 65019 | 9732 MB/s | 8905 MB/s | 18491 MB/s | 18969 MB/s | +| alice29.txt (128B) | 80 | 82 | 6691 MB/s | 17179 MB/s | 31883 MB/s | 38874 MB/s | +| alice29.txt (1000B) | 774 | 774 | 12204 MB/s | 13273 MB/s | 48056 MB/s | 52341 MB/s | +| alice29.txt (10000B) | 6648 | 6933 | 10044 MB/s | 12824 MB/s | 32378 MB/s | 46322 MB/s | +| alice29.txt (20000B) | 12686 | 13516 | 7733 MB/s | 12160 MB/s | 30566 MB/s | 58969 MB/s | + + +Speed is generally at or above Snappy. Small blocks gets a significant speedup, although at the expense of size. + +Decompression speed is better than Snappy, except in one case. + +Since payloads are very small the variance in terms of size is rather big, so they should only be seen as a general guideline. + +Size is on average around Snappy, but varies on content type. +In cases where compression is worse, it usually is compensated by a speed boost. + + +### Better compression + +Benchmarking single block performance is subject to a lot more variation since it only tests a limited number of file patterns. +So individual benchmarks should only be seen as a guideline and the overall picture is more important. + +| Absolute Perf | Snappy size | Better Size | Snappy Speed | Better Speed | Snappy dec | Better dec | +|-----------------------|-------------|-------------|--------------|--------------|-------------|-------------| +| html | 22843 | 18972 | 16246 MB/s | 8621 MB/s | 40972 MB/s | 40292 MB/s | +| urls.10K | 335492 | 248079 | 7943 MB/s | 5104 MB/s | 22523 MB/s | 20981 MB/s | +| fireworks.jpeg | 123034 | 123100 | 349544 MB/s | 84429 MB/s | 718321 MB/s | 823698 MB/s | +| fireworks.jpeg (200B) | 146 | 149 | 8869 MB/s | 7125 MB/s | 33691 MB/s | 30101 MB/s | +| paper-100k.pdf | 85304 | 82887 | 167546 MB/s | 11087 MB/s | 326905 MB/s | 198869 MB/s | +| html_x_4 | 92234 | 18982 | 15194 MB/s | 29316 MB/s | 30843 MB/s | 30937 MB/s | +| alice29.txt | 88034 | 71611 | 5936 MB/s | 3709 MB/s | 12882 MB/s | 16611 MB/s | +| asyoulik.txt | 77503 | 65941 | 5517 MB/s | 3380 MB/s | 12735 MB/s | 14975 MB/s | +| lcet10.txt | 234661 | 184939 | 6235 MB/s | 3537 MB/s | 14519 MB/s | 16634 MB/s | +| plrabn12.txt | 319267 | 264990 | 5159 MB/s | 2960 MB/s | 11923 MB/s | 13382 MB/s | +| geo.protodata | 23335 | 17689 | 21220 MB/s | 10859 MB/s | 56271 MB/s | 57961 MB/s | +| kppkn.gtb | 69526 | 55398 | 9732 MB/s | 5206 MB/s | 18491 MB/s | 16524 MB/s | +| alice29.txt (128B) | 80 | 78 | 6691 MB/s | 7422 MB/s | 31883 MB/s | 34225 MB/s | +| alice29.txt (1000B) | 774 | 746 | 12204 MB/s | 5734 MB/s | 48056 MB/s | 42068 MB/s | +| alice29.txt (10000B) | 6648 | 6218 | 10044 MB/s | 6055 MB/s | 32378 MB/s | 28813 MB/s | +| alice29.txt (20000B) | 12686 | 11492 | 7733 MB/s | 3143 MB/s | 30566 MB/s | 27315 MB/s | + + +Except for the mostly incompressible JPEG image compression is better and usually in the +double digits in terms of percentage reduction over Snappy. + +The PDF sample shows a significant slowdown compared to Snappy, as this mode tries harder +to compress the data. Very small blocks are also not favorable for better compression, so throughput is way down. + +This mode aims to provide better compression at the expense of performance and achieves that +without a huge performance penalty, except on very small blocks. + +Decompression speed suffers a little compared to the regular S2 mode, +but still manages to be close to Snappy in spite of increased compression. + +# Best compression mode + +S2 offers a "best" compression mode. + +This will compress as much as possible with little regard to CPU usage. + +Mainly for offline compression, but where decompression speed should still +be high and compatible with other S2 compressed data. + +Some examples compared on 16 core CPU, amd64 assembly used: + +``` +* enwik10 +Default... 10000000000 -> 4759950115 [47.60%]; 1.03s, 9263.0MB/s +Better... 10000000000 -> 4084706676 [40.85%]; 2.16s, 4415.4MB/s +Best... 10000000000 -> 3615520079 [36.16%]; 42.259s, 225.7MB/s + +* github-june-2days-2019.json +Default... 6273951764 -> 1041700255 [16.60%]; 431ms, 13882.3MB/s +Better... 6273951764 -> 945841238 [15.08%]; 547ms, 10938.4MB/s +Best... 6273951764 -> 826392576 [13.17%]; 9.455s, 632.8MB/s + +* nyc-taxi-data-10M.csv +Default... 3325605752 -> 1093516949 [32.88%]; 324ms, 9788.7MB/s +Better... 3325605752 -> 885394158 [26.62%]; 491ms, 6459.4MB/s +Best... 3325605752 -> 773681257 [23.26%]; 8.29s, 412.0MB/s + +* 10gb.tar +Default... 10065157632 -> 5915541066 [58.77%]; 1.028s, 9337.4MB/s +Better... 10065157632 -> 5453844650 [54.19%]; 1.597s, 4862.7MB/s +Best... 10065157632 -> 5192495021 [51.59%]; 32.78s, 308.2MB/ + +* consensus.db.10gb +Default... 10737418240 -> 4549762344 [42.37%]; 882ms, 12118.4MB/s +Better... 10737418240 -> 4438535064 [41.34%]; 1.533s, 3500.9MB/s +Best... 10737418240 -> 4210602774 [39.21%]; 42.96s, 254.4MB/s +``` + +Decompression speed should be around the same as using the 'better' compression mode. + +## Dictionaries + +*Note: S2 dictionary compression is currently at an early implementation stage, with no assembly for +neither encoding nor decoding. Performance improvements can be expected in the future.* + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +The same dictionary *must* be used for both encoding and decoding. +S2 does not keep track of whether the same dictionary is used, +and using the wrong dictionary will most often not result in an error when decompressing. + +Blocks encoded *without* dictionaries can be decompressed seamlessly *with* a dictionary. +This means it is possible to switch from an encoding without dictionaries to an encoding with dictionaries +and treat the blocks similarly. + +Similar to [zStandard dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression), +the same usage scenario applies to S2 dictionaries. + +> Training works if there is some correlation in a family of small data samples. The more data-specific a dictionary is, the more efficient it is (there is no universal dictionary). Hence, deploying one dictionary per type of data will provide the greatest benefits. Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm will gradually use previously decoded content to better compress the rest of the file. + +S2 further limits the dictionary to only be enabled on the first 64KB of a block. +This will remove any negative (speed) impacts of the dictionaries on bigger blocks. + +### Compression + +Using the [github_users_sample_set](https://github.com/facebook/zstd/releases/download/v1.1.3/github_users_sample_set.tar.zst) +and a 64KB dictionary trained with zStandard the following sizes can be achieved. + +| | Default | Better | Best | +|--------------------|------------------|------------------|-----------------------| +| Without Dictionary | 3362023 (44.92%) | 3083163 (41.19%) | 3057944 (40.86%) | +| With Dictionary | 921524 (12.31%) | 873154 (11.67%) | 785503 bytes (10.49%) | + +So for highly repetitive content, this case provides an almost 3x reduction in size. + +For less uniform data we will use the Go source code tree. +Compressing First 64KB of all `.go` files in `go/src`, Go 1.19.5, 8912 files, 51253563 bytes input: + +| | Default | Better | Best | +|--------------------|-------------------|-------------------|-------------------| +| Without Dictionary | 22955767 (44.79%) | 20189613 (39.39% | 19482828 (38.01%) | +| With Dictionary | 19654568 (38.35%) | 16289357 (31.78%) | 15184589 (29.63%) | +| Saving/file | 362 bytes | 428 bytes | 472 bytes | + + +### Creating Dictionaries + +There are no tools to create dictionaries in S2. +However, there are multiple ways to create a useful dictionary: + +#### Using a Sample File + +If your input is very uniform, you can just use a sample file as the dictionary. + +For example in the `github_users_sample_set` above, the average compression only goes up from +10.49% to 11.48% by using the first file as dictionary compared to using a dedicated dictionary. + +```Go + // Read a sample + sample, err := os.ReadFile("sample.json") + + // Create a dictionary. + dict := s2.MakeDict(sample, nil) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // To encode: + encoded := dict.Encode(nil, file) + + // To decode: + decoded, err := dict.Decode(nil, file) +``` + +#### Using Zstandard + +Zstandard dictionaries can easily be converted to S2 dictionaries. + +This can be helpful to generate dictionaries for files that don't have a fixed structure. + + +Example, with training set files placed in `./training-set`: + +`λ zstd -r --train-fastcover training-set/* --maxdict=65536 -o name.dict` + +This will create a dictionary of 64KB, that can be converted to a dictionary like this: + +```Go + // Decode the Zstandard dictionary. + insp, err := zstd.InspectDictionary(zdict) + if err != nil { + panic(err) + } + + // We are only interested in the contents. + // Assume that files start with "// Copyright (c) 2023". + // Search for the longest match for that. + // This may save a few bytes. + dict := s2.MakeDict(insp.Content(), []byte("// Copyright (c) 2023")) + + // b := dict.Bytes() will provide a dictionary that can be saved + // and reloaded with s2.NewDict(b). + + // We can now encode using this dictionary + encodedWithDict := dict.Encode(nil, payload) + + // To decode content: + decoded, err := dict.Decode(nil, encodedWithDict) +``` + +It is recommended to save the dictionary returned by ` b:= dict.Bytes()`, since that will contain only the S2 dictionary. + +This dictionary can later be loaded using `s2.NewDict(b)`. The dictionary then no longer requires `zstd` to be initialized. + +Also note how `s2.MakeDict` allows you to search for a common starting sequence of your files. +This can be omitted, at the expense of a few bytes. + +# Snappy Compatibility + +S2 now offers full compatibility with Snappy. + +This means that the efficient encoders of S2 can be used to generate fully Snappy compatible output. + +There is a [snappy](https://github.com/klauspost/compress/tree/master/snappy) package that can be used by +simply changing imports from `github.com/golang/snappy` to `github.com/klauspost/compress/snappy`. +This uses "better" mode for all operations. +If you would like more control, you can use the s2 package as described below: + +## Blocks + +Snappy compatible blocks can be generated with the S2 encoder. +Compression and speed is typically a bit better `MaxEncodedLen` is also smaller for smaller memory usage. Replace + +| Snappy | S2 replacement | +|---------------------------|-----------------------| +| snappy.Encode(...) | s2.EncodeSnappy(...) | +| snappy.MaxEncodedLen(...) | s2.MaxEncodedLen(...) | + +`s2.EncodeSnappy` can be replaced with `s2.EncodeSnappyBetter` or `s2.EncodeSnappyBest` to get more efficiently compressed snappy compatible output. + +`s2.ConcatBlocks` is compatible with snappy blocks. + +Comparison of [`webdevdata.org-2015-01-07-subset`](https://files.klauspost.com/compress/webdevdata.org-2015-01-07-4GB-subset.7z), +53927 files, total input size: 4,014,735,833 bytes. amd64, single goroutine used: + +| Encoder | Size | MB/s | Reduction | +|-----------------------|------------|------------|------------| +| snappy.Encode | 1128706759 | 725.59 | 71.89% | +| s2.EncodeSnappy | 1093823291 | **899.16** | 72.75% | +| s2.EncodeSnappyBetter | 1001158548 | 578.49 | 75.06% | +| s2.EncodeSnappyBest | 944507998 | 66.00 | **76.47%** | + +## Streams + +For streams, replace `enc = snappy.NewBufferedWriter(w)` with `enc = s2.NewWriter(w, s2.WriterSnappyCompat())`. +All other options are available, but note that block size limit is different for snappy. + +Comparison of different streams, AMD Ryzen 3950x, 16 cores. Size and throughput: + +| File | snappy.NewWriter | S2 Snappy | S2 Snappy, Better | S2 Snappy, Best | +|-----------------------------|--------------------------|---------------------------|--------------------------|-------------------------| +| nyc-taxi-data-10M.csv | 1316042016 - 539.47MB/s | 1307003093 - 10132.73MB/s | 1174534014 - 5002.44MB/s | 1115904679 - 177.97MB/s | +| enwik10 (xml) | 5088294643 - 451.13MB/s | 5175840939 - 9440.69MB/s | 4560784526 - 4487.21MB/s | 4340299103 - 158.92MB/s | +| 10gb.tar (mixed) | 6056946612 - 729.73MB/s | 6208571995 - 9978.05MB/s | 5741646126 - 4919.98MB/s | 5548973895 - 180.44MB/s | +| github-june-2days-2019.json | 1525176492 - 933.00MB/s | 1476519054 - 13150.12MB/s | 1400547532 - 5803.40MB/s | 1321887137 - 204.29MB/s | +| consensus.db.10gb (db) | 5412897703 - 1102.14MB/s | 5354073487 - 13562.91MB/s | 5335069899 - 5294.73MB/s | 5201000954 - 175.72MB/s | + +# Decompression + +All decompression functions map directly to equivalent s2 functions. + +| Snappy | S2 replacement | +|------------------------|--------------------| +| snappy.Decode(...) | s2.Decode(...) | +| snappy.DecodedLen(...) | s2.DecodedLen(...) | +| snappy.NewReader(...) | s2.NewReader(...) | + +Features like [quick forward skipping without decompression](https://pkg.go.dev/github.com/klauspost/compress/s2#Reader.Skip) +are also available for Snappy streams. + +If you know you are only decompressing snappy streams, setting [`ReaderMaxBlockSize(64<<10)`](https://pkg.go.dev/github.com/klauspost/compress/s2#ReaderMaxBlockSize) +on your Reader will reduce memory consumption. + +# Concatenating blocks and streams. + +Concatenating streams will concatenate the output of both without recompressing them. +While this is inefficient in terms of compression it might be usable in certain scenarios. +The 10 byte 'stream identifier' of the second stream can optionally be stripped, but it is not a requirement. + +Blocks can be concatenated using the `ConcatBlocks` function. + +Snappy blocks/streams can safely be concatenated with S2 blocks and streams. +Streams with indexes (see below) will currently not work on concatenated streams. + +# Stream Seek Index + +S2 and Snappy streams can have indexes. These indexes will allow random seeking within the compressed data. + +The index can either be appended to the stream as a skippable block or returned for separate storage. + +When the index is appended to a stream it will be skipped by regular decoders, +so the output remains compatible with other decoders. + +## Creating an Index + +To automatically add an index to a stream, add `WriterAddIndex()` option to your writer. +Then the index will be added to the stream when `Close()` is called. + +``` + // Add Index to stream... + enc := s2.NewWriter(w, s2.WriterAddIndex()) + io.Copy(enc, r) + enc.Close() +``` + +If you want to store the index separately, you can use `CloseIndex()` instead of the regular `Close()`. +This will return the index. Note that `CloseIndex()` should only be called once, and you shouldn't call `Close()`. + +``` + // Get index for separate storage... + enc := s2.NewWriter(w) + io.Copy(enc, r) + index, err := enc.CloseIndex() +``` + +The `index` can then be used needing to read from the stream. +This means the index can be used without needing to seek to the end of the stream +or for manually forwarding streams. See below. + +Finally, an existing S2/Snappy stream can be indexed using the `s2.IndexStream(r io.Reader)` function. + +## Using Indexes + +To use indexes there is a `ReadSeeker(random bool, index []byte) (*ReadSeeker, error)` function available. + +Calling ReadSeeker will return an [io.ReadSeeker](https://pkg.go.dev/io#ReadSeeker) compatible version of the reader. + +If 'random' is specified the returned io.Seeker can be used for random seeking, otherwise only forward seeking is supported. +Enabling random seeking requires the original input to support the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, nil) + rs.Seek(wantOffset, io.SeekStart) +``` + +Get a seeker to seek forward. Since no index is provided, the index is read from the stream. +This requires that an index was added and that `r` supports the [io.Seeker](https://pkg.go.dev/io#Seeker) interface. + +A custom index can be specified which will be used if supplied. +When using a custom index, it will not be read from the input stream. + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(false, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +This will read the index from `index`. Since we specify non-random (forward only) seeking `r` does not have to be an io.Seeker + +``` + dec := s2.NewReader(r) + rs, err := dec.ReadSeeker(true, index) + rs.Seek(wantOffset, io.SeekStart) +``` + +Finally, since we specify that we want to do random seeking `r` must be an io.Seeker. + +The returned [ReadSeeker](https://pkg.go.dev/github.com/klauspost/compress/s2#ReadSeeker) contains a shallow reference to the existing Reader, +meaning changes performed to one is reflected in the other. + +To check if a stream contains an index at the end, the `(*Index).LoadStream(rs io.ReadSeeker) error` can be used. + +## Manually Forwarding Streams + +Indexes can also be read outside the decoder using the [Index](https://pkg.go.dev/github.com/klauspost/compress/s2#Index) type. +This can be used for parsing indexes, either separate or in streams. + +In some cases it may not be possible to serve a seekable stream. +This can for instance be an HTTP stream, where the Range request +is sent at the start of the stream. + +With a little bit of extra code it is still possible to use indexes +to forward to specific offset with a single forward skip. + +It is possible to load the index manually like this: +``` + var index s2.Index + _, err = index.Load(idxBytes) +``` + +This can be used to figure out how much to offset the compressed stream: + +``` + compressedOffset, uncompressedOffset, err := index.Find(wantOffset) +``` + +The `compressedOffset` is the number of bytes that should be skipped +from the beginning of the compressed file. + +The `uncompressedOffset` will then be offset of the uncompressed bytes returned +when decoding from that position. This will always be <= wantOffset. + +When creating a decoder it must be specified that it should *not* expect a stream identifier +at the beginning of the stream. Assuming the io.Reader `r` has been forwarded to `compressedOffset` +we create the decoder like this: + +``` + dec := s2.NewReader(r, s2.ReaderIgnoreStreamIdentifier()) +``` + +We are not completely done. We still need to forward the stream the uncompressed bytes we didn't want. +This is done using the regular "Skip" function: + +``` + err = dec.Skip(wantOffset - uncompressedOffset) +``` + +This will ensure that we are at exactly the offset we want, and reading from `dec` will start at the requested offset. + +# Compact storage + +For compact storage [RemoveIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RemoveIndexHeaders) can be used to remove any redundant info from +a serialized index. If you remove the header it must be restored before [Loading](https://pkg.go.dev/github.com/klauspost/compress/s2#Index.Load). + +This is expected to save 20 bytes. These can be restored using [RestoreIndexHeaders](https://pkg.go.dev/github.com/klauspost/compress/s2#RestoreIndexHeaders). This removes a layer of security, but is the most compact representation. Returns nil if headers contains errors. + +## Index Format: + +Each block is structured as a snappy skippable block, with the chunk ID 0x99. + +The block can be read from the front, but contains information so it can be read from the back as well. + +Numbers are stored as fixed size little endian values or [zigzag encoded](https://developers.google.com/protocol-buffers/docs/encoding#signed_integers) [base 128 varints](https://developers.google.com/protocol-buffers/docs/encoding), +with un-encoded value length of 64 bits, unless other limits are specified. + +| Content | Format | +|--------------------------------------|-------------------------------------------------------------------------------------------------------------------------------| +| ID, `[1]byte` | Always 0x99. | +| Data Length, `[3]byte` | 3 byte little-endian length of the chunk in bytes, following this. | +| Header `[6]byte` | Header, must be `[115, 50, 105, 100, 120, 0]` or in text: "s2idx\x00". | +| UncompressedSize, Varint | Total Uncompressed size. | +| CompressedSize, Varint | Total Compressed size if known. Should be -1 if unknown. | +| EstBlockSize, Varint | Block Size, used for guessing uncompressed offsets. Must be >= 0. | +| Entries, Varint | Number of Entries in index, must be < 65536 and >=0. | +| HasUncompressedOffsets `byte` | 0 if no uncompressed offsets are present, 1 if present. Other values are invalid. | +| UncompressedOffsets, [Entries]VarInt | Uncompressed offsets. See below how to decode. | +| CompressedOffsets, [Entries]VarInt | Compressed offsets. See below how to decode. | +| Block Size, `[4]byte` | Little Endian total encoded size (including header and trailer). Can be used for searching backwards to start of block. | +| Trailer `[6]byte` | Trailer, must be `[0, 120, 100, 105, 50, 115]` or in text: "\x00xdi2s". Can be used for identifying block from end of stream. | + +For regular streams the uncompressed offsets are fully predictable, +so `HasUncompressedOffsets` allows to specify that compressed blocks all have +exactly `EstBlockSize` bytes of uncompressed content. + +Entries *must* be in order, starting with the lowest offset, +and there *must* be no uncompressed offset duplicates. +Entries *may* point to the start of a skippable block, +but it is then not allowed to also have an entry for the next block since +that would give an uncompressed offset duplicate. + +There is no requirement for all blocks to be represented in the index. +In fact there is a maximum of 65536 block entries in an index. + +The writer can use any method to reduce the number of entries. +An implicit block start at 0,0 can be assumed. + +### Decoding entries: + +``` +// Read Uncompressed entries. +// Each assumes EstBlockSize delta from previous. +for each entry { + uOff = 0 + if HasUncompressedOffsets == 1 { + uOff = ReadVarInt // Read value from stream + } + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].UncompressedOffset = uOff + continue + } + + // Uncompressed uses previous offset and adds EstBlockSize + entry[entryNum].UncompressedOffset = entry[entryNum-1].UncompressedOffset + EstBlockSize + uOff +} + + +// Guess that the first block will be 50% of uncompressed size. +// Integer truncating division must be used. +CompressGuess := EstBlockSize / 2 + +// Read Compressed entries. +// Each assumes CompressGuess delta from previous. +// CompressGuess is adjusted for each value. +for each entry { + cOff = ReadVarInt // Read value from stream + + // Except for the first entry, use previous values. + if entryNum == 0 { + entry[entryNum].CompressedOffset = cOff + continue + } + + // Compressed uses previous and our estimate. + entry[entryNum].CompressedOffset = entry[entryNum-1].CompressedOffset + CompressGuess + cOff + + // Adjust compressed offset for next loop, integer truncating division must be used. + CompressGuess += cOff/2 +} +``` + +To decode from any given uncompressed offset `(wantOffset)`: + +* Iterate entries until `entry[n].UncompressedOffset > wantOffset`. +* Start decoding from `entry[n-1].CompressedOffset`. +* Discard `entry[n-1].UncompressedOffset - wantOffset` bytes from the decoded stream. + +See [using indexes](https://github.com/klauspost/compress/tree/master/s2#using-indexes) for functions that perform the operations with a simpler interface. + + +# Format Extensions + +* Frame [Stream identifier](https://github.com/google/snappy/blob/master/framing_format.txt#L68) changed from `sNaPpY` to `S2sTwO`. +* [Framed compressed blocks](https://github.com/google/snappy/blob/master/format_description.txt) can be up to 4MB (up from 64KB). +* Compressed blocks can have an offset of `0`, which indicates to repeat the last seen offset. + +Repeat offsets must be encoded as a [2.2.1. Copy with 1-byte offset (01)](https://github.com/google/snappy/blob/master/format_description.txt#L89), where the offset is 0. + +The length is specified by reading the 3-bit length specified in the tag and decode using this table: + +| Length | Actual Length | +|--------|----------------------| +| 0 | 4 | +| 1 | 5 | +| 2 | 6 | +| 3 | 7 | +| 4 | 8 | +| 5 | 8 + read 1 byte | +| 6 | 260 + read 2 bytes | +| 7 | 65540 + read 3 bytes | + +This allows any repeat offset + length to be represented by 2 to 5 bytes. +It also allows to emit matches longer than 64 bytes with one copy + one repeat instead of several 64 byte copies. + +Lengths are stored as little endian values. + +The first copy of a block cannot be a repeat offset and the offset is reset on every block in streams. + +Default streaming block size is 1MB. + +# Dictionary Encoding + +Adding dictionaries allow providing a custom dictionary that will serve as lookup in the beginning of blocks. + +A dictionary provides an initial repeat value that can be used to point to a common header. + +Other than that the dictionary contains values that can be used as back-references. + +Often used data should be placed at the *end* of the dictionary since offsets < 2048 bytes will be smaller. + +## Format + +Dictionary *content* must at least 16 bytes and less or equal to 64KiB (65536 bytes). + +Encoding: `[repeat value (uvarint)][dictionary content...]` + +Before the dictionary content, an unsigned base-128 (uvarint) encoded value specifying the initial repeat offset. +This value is an offset into the dictionary content and not a back-reference offset, +so setting this to 0 will make the repeat value point to the first value of the dictionary. + +The value must be less than the dictionary length-8 + +## Encoding + +From the decoder point of view the dictionary content is seen as preceding the encoded content. + +`[dictionary content][decoded output]` + +Backreferences to the dictionary are encoded as ordinary backreferences that have an offset before the start of the decoded block. + +Matches copying from the dictionary are **not** allowed to cross from the dictionary into the decoded data. +However, if a copy ends at the end of the dictionary the next repeat will point to the start of the decoded buffer, which is allowed. + +The first match can be a repeat value, which will use the repeat offset stored in the dictionary. + +When 64KB (65536 bytes) has been en/decoded it is no longer allowed to reference the dictionary, +neither by a copy nor repeat operations. +If the boundary is crossed while copying from the dictionary, the operation should complete, +but the next instruction is not allowed to reference the dictionary. + +Valid blocks encoded *without* a dictionary can be decoded with any dictionary. +There are no checks whether the supplied dictionary is the correct for a block. +Because of this there is no overhead by using a dictionary. + +## Example + +This is the dictionary content. Elements are separated by `[]`. + +Dictionary: `[0x0a][Yesterday 25 bananas were added to Benjamins brown bag]`. + +Initial repeat offset is set at 10, which is the letter `2`. + +Encoded `[LIT "10"][REPEAT len=10][LIT "hich"][MATCH off=50 len=6][MATCH off=31 len=6][MATCH off=61 len=10]` + +Decoded: `[10][ bananas w][hich][ were ][brown ][were added]` + +Output: `10 bananas which were brown were added` + + +## Streams + +For streams each block can use the dictionary. + +The dictionary cannot not currently be provided on the stream. + + +# LICENSE + +This code is based on the [Snappy-Go](https://github.com/golang/snappy) implementation. + +Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go new file mode 100644 index 00000000000..6c7feafcc66 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -0,0 +1,437 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" + "strconv" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("s2: corrupt input") + // ErrCRC reports that the input failed CRC validation (streams only) + ErrCRC = errors.New("s2: corrupt input, crc mismatch") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("s2: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("s2: unsupported input") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + if s2Decode(dst, src[s:]) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +// s2DecodeDict writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2DecodeDict(dst, src []byte, dict *Dict) int { + if dict == nil { + return s2Decode(dst, src) + } + const debug = false + const debugErrs = debug + + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := len(dict.dict) - dict.repeat + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 + s += 3 + case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 + s += 4 + case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + s += 5 + } + length = int(x) + 1 + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 + s += 3 + + case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 + s += 5 + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + startOff := len(dict.dict) - offset + d + if startOff < 0 || startOff+length > len(dict.dict) { + if debugErrs { + fmt.Printf("offset (%d) + length (%d) bigger than dict (%d)\n", offset, length, len(dict.dict)) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("dict copy, length:", length, "offset:", offset, "d-after:", d+length, "dict start offset:", startOff) + } + copy(dst[d:d+length], dict.dict[startOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debugErrs { + fmt.Println("corrupt literal: length:", length, "d-left:", len(dst)-d, "src-left:", len(src)-s) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + if debugErrs { + fmt.Println("src went oob") + } + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || length > len(dst)-d { + if debugErrs { + fmt.Println("match error; offset:", offset, "length:", length, "dst-left:", len(dst)-d) + } + return decodeErrCodeCorrupt + } + + // copy from dict + if d < offset { + if d > MaxDictSrcOffset { + if debugErrs { + fmt.Println("dict after", MaxDictSrcOffset, "d:", d, "offset:", offset, "length:", length) + } + return decodeErrCodeCorrupt + } + rOff := len(dict.dict) - (offset - d) + if debug { + fmt.Println("starting dict entry from dict offset", len(dict.dict)-rOff) + } + if rOff+length > len(dict.dict) { + if debugErrs { + fmt.Println("err: END offset", rOff+length, "bigger than dict", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + if rOff < 0 { + if debugErrs { + fmt.Println("err: START offset", rOff, "less than 0", len(dict.dict), "dict offset:", rOff, "length:", length) + } + return decodeErrCodeCorrupt + } + copy(dst[d:d+length], dict.dict[rOff:]) + d += length + continue + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + if debugErrs { + fmt.Println("wanted length", len(dst), "got", d) + } + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/decode_amd64.s b/vendor/github.com/klauspost/compress/s2/decode_amd64.s new file mode 100644 index 00000000000..9b105e03c59 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_amd64.s @@ -0,0 +1,568 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 AX +#define R_TMP1 BX +#define R_LEN CX +#define R_OFF DX +#define R_SRC SI +#define R_DST DI +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x (shared) +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $48-56 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DST + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SRC + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + XORQ R_OFF, R_OFF + +loop: + // for s < len(src) + CMPQ R_SRC, R_SEND + JEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (R_SRC), R_LEN + MOVL R_LEN, R_TMP1 + ANDL $3, R_TMP1 + CMPL R_TMP1, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, R_LEN + CMPL R_LEN, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + INCQ R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVQ R_DEND, R_TMP0 + SUBQ R_DST, R_TMP0 + MOVQ R_SEND, R_TMP1 + SUBQ R_SRC, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ R_LEN, $16 + JGT callMemmove + CMPQ R_TMP0, $16 + JLT callMemmove + CMPQ R_TMP1, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R_SRC), X0 + MOVOU X0, 0(R_DST) + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ R_LEN, R_TMP0 + JGT errCorrupt + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ R_DST, 0(SP) + MOVQ R_SRC, 8(SP) + MOVQ R_LEN, 16(SP) + MOVQ R_DST, 24(SP) + MOVQ R_SRC, 32(SP) + MOVQ R_LEN, 40(SP) + MOVQ R_OFF, 48(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVQ 24(SP), R_DST + MOVQ 32(SP), R_SRC + MOVQ 40(SP), R_LEN + MOVQ 48(SP), R_OFF + MOVQ dst_base+0(FP), R_DBASE + MOVQ dst_len+8(FP), R_DLEN + MOVQ R_DBASE, R_DEND + ADDQ R_DLEN, R_DEND + MOVQ src_base+24(FP), R_SBASE + MOVQ src_len+32(FP), R_SLEN + MOVQ R_SBASE, R_SEND + ADDQ R_SLEN, R_SEND + + // d += length + // s += length + ADDQ R_LEN, R_DST + ADDQ R_LEN, R_SRC + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ R_LEN, R_SRC + SUBQ $58, R_SRC + CMPQ R_SRC, R_SEND + JA errCorrupt + + // case x == 60: + CMPL R_LEN, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(R_SRC), R_LEN + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(R_SRC), R_LEN + JMP doLit + +tagLit62Plus: + CMPL R_LEN, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + // We read one byte, safe to read one back, since we are just reading tag. + // x = binary.LittleEndian.Uint32(src[s-1:]) >> 8 + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(R_SRC), R_LEN + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(R_SRC), R_OFF + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, R_LEN + INCQ R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(R_SRC), R_OFF + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMPQ R_TMP1, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // length = 4 + int(src[s-2])>>2&0x7 + MOVBQZX -1(R_SRC), R_TMP1 + MOVQ R_LEN, R_TMP0 + SHRQ $2, R_LEN + ANDQ $0xe0, R_TMP0 + ANDQ $7, R_LEN + SHLQ $3, R_TMP0 + ADDQ $4, R_LEN + ORQ R_TMP1, R_TMP0 + + // check if repeat code, ZF set by ORQ. + JZ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (length) + MOVQ R_TMP0, R_OFF + JMP doCopy + +// This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMPQ R_LEN, $9 + JL doCopyRepeat + + // Read additional bytes for length. + JE repeatLen1 + + // Rare, so the extra branch shouldn't hurt too much. + CMPQ R_LEN, $10 + JE repeatLen2 + JMP repeatLen3 + +// Read repeat lengths. +repeatLen1: + // s ++ + ADDQ $1, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = src[s-1] + 8 + MOVBQZX -1(R_SRC), R_LEN + ADDL $8, R_LEN + JMP doCopyRepeat + +repeatLen2: + // s +=2 + ADDQ $2, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + (1 << 8) + MOVWQZX -2(R_SRC), R_LEN + ADDL $260, R_LEN + JMP doCopyRepeat + +repeatLen3: + // s +=3 + ADDQ $3, R_SRC + + // if uint(s) > uint(len(src)) { etc } + CMPQ R_SRC, R_SEND + JA errCorrupt + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + (1 << 16) + // Read one byte further back (just part of the tag, shifted out) + MOVL -4(R_SRC), R_LEN + SHRL $8, R_LEN + ADDL $65540, R_LEN + JMP doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVQ R_DST, R_TMP1 + SUBQ R_DBASE, R_TMP1 + CMPQ R_TMP1, R_OFF + JLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + // if offset <= 0 { etc } + CMPQ R_OFF, $0 + JLE errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R_DEND, R_TMP1 + SUBQ R_DST, R_TMP1 + CMPQ R_LEN, R_TMP1 + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVQ R_DEND, R_TMP2 + SUBQ R_DST, R_TMP2 + MOVQ R_DST, R_TMP3 + SUBQ R_OFF, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ R_LEN, $16 + JGT slowForwardCopy + CMPQ R_OFF, $8 + JLT slowForwardCopy + CMPQ R_TMP2, $16 + JLT slowForwardCopy + MOVQ 0(R_TMP3), R_TMP0 + MOVQ R_TMP0, 0(R_DST) + MOVQ 8(R_TMP3), R_TMP1 + MOVQ R_TMP1, 8(R_DST) + ADDQ R_LEN, R_DST + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R_TMP2 + CMPQ R_LEN, R_TMP2 + JGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVQ R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMPQ R_TMP2, $8 + JGE fixUpSlowForwardCopy + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_DST) + SUBQ R_TMP2, R_LEN + ADDQ R_TMP2, R_DST + ADDQ R_TMP2, R_TMP2 + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ R_DST, R_TMP0 + ADDQ R_LEN, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ R_LEN, $0 + JLE loop + MOVQ (R_TMP3), R_TMP1 + MOVQ R_TMP1, (R_TMP0) + ADDQ $8, R_TMP3 + ADDQ $8, R_TMP0 + SUBQ $8, R_LEN + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + INCQ R_TMP3 + INCQ R_DST + DECQ R_LEN + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ R_DST, R_DEND + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_arm64.s b/vendor/github.com/klauspost/compress/s2/decode_arm64.s new file mode 100644 index 00000000000..4b63d5086a9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_arm64.s @@ -0,0 +1,574 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +#define R_TMP0 R2 +#define R_TMP1 R3 +#define R_LEN R4 +#define R_OFF R5 +#define R_SRC R6 +#define R_DST R7 +#define R_DBASE R8 +#define R_DLEN R9 +#define R_DEND R10 +#define R_SBASE R11 +#define R_SLEN R12 +#define R_SEND R13 +#define R_TMP2 R14 +#define R_TMP3 R15 + +// TEST_SRC will check if R_SRC is <= SRC_END +#define TEST_SRC() \ + CMP R_SEND, R_SRC \ + BGT errCorrupt + +// MOVD R_SRC, R_TMP1 +// SUB R_SBASE, R_TMP1, R_TMP1 +// CMP R_SLEN, R_TMP1 +// BGT errCorrupt + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - R_TMP0 scratch +// - R_TMP1 scratch +// - R_LEN length or x +// - R_OFF offset +// - R_SRC &src[s] +// - R_DST &dst[d] +// + R_DBASE dst_base +// + R_DLEN dst_len +// + R_DEND dst_base + dst_len +// + R_SBASE src_base +// + R_SLEN src_len +// + R_SEND src_base + src_len +// - R_TMP2 used by doCopy +// - R_TMP3 used by doCopy +// +// The registers R_DBASE-R_SEND (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly R_DST - R_DBASE, and len(dst)-d is R_DEND - R_DST. +// The s variable is implicitly R_SRC - R_SBASE, and len(src)-s is R_SEND - R_SRC. +TEXT ·s2Decode(SB), NOSPLIT, $56-64 + // Initialize R_SRC, R_DST and R_DBASE-R_SEND. + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DST + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SRC + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + MOVD $0, R_OFF + +loop: + // for s < len(src) + CMP R_SEND, R_SRC + BEQ end + + // R_LEN = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBU (R_SRC), R_LEN + MOVW R_LEN, R_TMP1 + ANDW $3, R_TMP1 + MOVW $1, R1 + CMPW R1, R_TMP1 + BGE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + MOVW $60, R1 + LSRW $2, R_LEN, R_LEN + CMPW R_LEN, R1 + BLS tagLit60Plus + + // case x < 60: + // s++ + ADD $1, R_SRC, R_SRC + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that R_LEN == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // R_LEN can hold 64 bits, so the increment cannot overflow. + ADD $1, R_LEN, R_LEN + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // R_TMP0 = len(dst) - d + // R_TMP1 = len(src) - s + MOVD R_DEND, R_TMP0 + SUB R_DST, R_TMP0, R_TMP0 + MOVD R_SEND, R_TMP1 + SUB R_SRC, R_TMP1, R_TMP1 + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMP $16, R_LEN + BGT callMemmove + CMP $16, R_TMP0 + BLT callMemmove + CMP $16, R_TMP1 + BLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + LDP 0(R_SRC), (R_TMP2, R_TMP3) + STP (R_TMP2, R_TMP3), 0(R_DST) + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMP R_TMP0, R_LEN + BGT errCorrupt + CMP R_TMP1, R_LEN + BGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // R_DST, R_SRC and R_LEN as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVD R_DST, 8(RSP) + MOVD R_SRC, 16(RSP) + MOVD R_LEN, 24(RSP) + MOVD R_DST, 32(RSP) + MOVD R_SRC, 40(RSP) + MOVD R_LEN, 48(RSP) + MOVD R_OFF, 56(RSP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R_DBASE-R_SEND. + MOVD 32(RSP), R_DST + MOVD 40(RSP), R_SRC + MOVD 48(RSP), R_LEN + MOVD 56(RSP), R_OFF + MOVD dst_base+0(FP), R_DBASE + MOVD dst_len+8(FP), R_DLEN + MOVD R_DBASE, R_DEND + ADD R_DLEN, R_DEND, R_DEND + MOVD src_base+24(FP), R_SBASE + MOVD src_len+32(FP), R_SLEN + MOVD R_SBASE, R_SEND + ADD R_SLEN, R_SEND, R_SEND + + // d += length + // s += length + ADD R_LEN, R_DST, R_DST + ADD R_LEN, R_SRC, R_SRC + B loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADD R_LEN, R_SRC, R_SRC + SUB $58, R_SRC, R_SRC + TEST_SRC() + + // case x == 60: + MOVW $61, R1 + CMPW R1, R_LEN + BEQ tagLit61 + BGT tagLit62Plus + + // x = uint32(src[s-1]) + MOVBU -1(R_SRC), R_LEN + B doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVHU -2(R_SRC), R_LEN + B doLit + +tagLit62Plus: + CMPW $62, R_LEN + BHI tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVHU -3(R_SRC), R_LEN + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP1<<16, R_LEN + B doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVWU -4(R_SRC), R_LEN + B doLit + + // The code above handles literal tags. + // ---------------------------------------- + // The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADD $5, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + MOVD R_SRC, R_TMP1 + SUB R_SBASE, R_TMP1, R_TMP1 + CMP R_SLEN, R_TMP1 + BGT errCorrupt + + // length = 1 + int(src[s-5])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVWU -4(R_SRC), R_OFF + B doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = 1 + int(src[s-3])>>2 + MOVD $1, R1 + ADD R_LEN>>2, R1, R_LEN + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVHU -2(R_SRC), R_OFF + B doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - R_TMP1 == src[s] & 0x03 + // - R_LEN == src[s] + CMP $2, R_TMP1 + BEQ tagCopy2 + BGT tagCopy4 + + // case tagCopy1: + // s += 2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + // Calculate offset in R_TMP0 in case it is a repeat. + MOVD R_LEN, R_TMP0 + AND $0xe0, R_TMP0 + MOVBU -1(R_SRC), R_TMP1 + ORR R_TMP0<<3, R_TMP1, R_TMP0 + + // length = 4 + int(src[s-2])>>2&0x7 + MOVD $7, R1 + AND R_LEN>>2, R1, R_LEN + ADD $4, R_LEN, R_LEN + + // check if repeat code with offset 0. + CMP $0, R_TMP0 + BEQ repeatCode + + // This is a regular copy, transfer our temporary value to R_OFF (offset) + MOVD R_TMP0, R_OFF + B doCopy + + // This is a repeat code. +repeatCode: + // If length < 9, reuse last offset, with the length already calculated. + CMP $9, R_LEN + BLT doCopyRepeat + BEQ repeatLen1 + CMP $10, R_LEN + BEQ repeatLen2 + +repeatLen3: + // s +=3 + ADD $3, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-3]) | (uint32(src[s-2])<<8) | (uint32(src[s-1])<<16) + 65540 + MOVBU -1(R_SRC), R_TMP0 + MOVHU -3(R_SRC), R_LEN + ORR R_TMP0<<16, R_LEN, R_LEN + ADD $65540, R_LEN, R_LEN + B doCopyRepeat + +repeatLen2: + // s +=2 + ADD $2, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = uint32(src[s-2]) | (uint32(src[s-1])<<8) + 260 + MOVHU -2(R_SRC), R_LEN + ADD $260, R_LEN, R_LEN + B doCopyRepeat + +repeatLen1: + // s +=1 + ADD $1, R_SRC, R_SRC + + // if uint(s) > uint(len(src)) { etc } + TEST_SRC() + + // length = src[s-1] + 8 + MOVBU -1(R_SRC), R_LEN + ADD $8, R_LEN, R_LEN + B doCopyRepeat + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - R_LEN == length && R_LEN > 0 + // - R_OFF == offset + + // if d < offset { etc } + MOVD R_DST, R_TMP1 + SUB R_DBASE, R_TMP1, R_TMP1 + CMP R_OFF, R_TMP1 + BLT errCorrupt + + // Repeat values can skip the test above, since any offset > 0 will be in dst. +doCopyRepeat: + + // if offset <= 0 { etc } + CMP $0, R_OFF + BLE errCorrupt + + // if length > len(dst)-d { etc } + MOVD R_DEND, R_TMP1 + SUB R_DST, R_TMP1, R_TMP1 + CMP R_TMP1, R_LEN + BGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R_TMP2 = len(dst)-d + // - R_TMP3 = &dst[d-offset] + MOVD R_DEND, R_TMP2 + SUB R_DST, R_TMP2, R_TMP2 + MOVD R_DST, R_TMP3 + SUB R_OFF, R_TMP3, R_TMP3 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMP $16, R_LEN + BGT slowForwardCopy + CMP $8, R_OFF + BLT slowForwardCopy + CMP $16, R_TMP2 + BLT slowForwardCopy + MOVD 0(R_TMP3), R_TMP0 + MOVD R_TMP0, 0(R_DST) + MOVD 8(R_TMP3), R_TMP1 + MOVD R_TMP1, 8(R_DST) + ADD R_LEN, R_DST, R_DST + B loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUB $10, R_TMP2, R_TMP2 + CMP R_TMP2, R_LEN + BGT verySlowForwardCopy + + // We want to keep the offset, so we use R_TMP2 from here. + MOVD R_OFF, R_TMP2 + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R_TMP3, is unchanged. + // } + CMP $8, R_TMP2 + BGE fixUpSlowForwardCopy + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_DST) + SUB R_TMP2, R_LEN, R_LEN + ADD R_TMP2, R_DST, R_DST + ADD R_TMP2, R_TMP2, R_TMP2 + B makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by R_DST being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save R_DST to R_TMP0 so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVD R_DST, R_TMP0 + ADD R_LEN, R_DST, R_DST + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + MOVD $0, R1 + CMP R1, R_LEN + BLE loop + MOVD (R_TMP3), R_TMP1 + MOVD R_TMP1, (R_TMP0) + ADD $8, R_TMP3, R_TMP3 + ADD $8, R_TMP0, R_TMP0 + SUB $8, R_LEN, R_LEN + B finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R_TMP3), R_TMP1 + MOVB R_TMP1, (R_DST) + ADD $1, R_TMP3, R_TMP3 + ADD $1, R_DST, R_DST + SUB $1, R_LEN, R_LEN + CBNZ R_LEN, verySlowForwardCopy + B loop + + // The code above handles copy tags. + // ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMP R_DEND, R_DST + BNE errCorrupt + + // return 0 + MOVD $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVD $1, R_TMP0 + MOVD R_TMP0, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/decode_asm.go b/vendor/github.com/klauspost/compress/s2/decode_asm.go new file mode 100644 index 00000000000..cb3576edd47 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_asm.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (amd64 || arm64) && !appengine && gc && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !noasm + +package s2 + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func s2Decode(dst, src []byte) int diff --git a/vendor/github.com/klauspost/compress/s2/decode_other.go b/vendor/github.com/klauspost/compress/s2/decode_other.go new file mode 100644 index 00000000000..2cb55c2c775 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/decode_other.go @@ -0,0 +1,292 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (!amd64 && !arm64) || appengine || !gc || noasm +// +build !amd64,!arm64 appengine !gc noasm + +package s2 + +import ( + "fmt" + "strconv" +) + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func s2Decode(dst, src []byte) int { + const debug = false + if debug { + fmt.Println("Starting decode, dst len:", len(dst)) + } + var d, s, length int + offset := 0 + + // As long as we can read at least 5 bytes... + for s < len(src)-5 { + // Removing bounds checks is SLOWER, when if doing + // in := src[s:s+5] + // Checked on Go 1.18 + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + x = uint32(src[s-1]) + case x == 61: + in := src[s : s+3] + x = uint32(in[1]) | uint32(in[2])<<8 + s += 3 + case x == 62: + in := src[s : s+4] + // Load as 32 bit and shift down. + x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x >>= 8 + s += 4 + case x == 63: + in := src[s : s+5] + x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24 + s += 5 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + length = int(src[s-2]) >> 2 & 0x7 + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + length = int(src[s]) + 4 + s += 1 + case 6: + in := src[s : s+2] + length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8) + s += 2 + case 7: + in := src[s : s+3] + length = int((uint32(in[2])<<16)|(uint32(in[1])<<8)|uint32(in[0])) + (1 << 16) + s += 3 + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + in := src[s : s+3] + offset = int(uint32(in[1]) | uint32(in[2])<<8) + length = 1 + int(in[0])>>2 + s += 3 + + case tagCopy4: + in := src[s : s+5] + offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24) + length = 1 + int(in[0])>>2 + s += 5 + } + + if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + // Remaining with extra checks... + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length > len(dst)-d || length > len(src)-s || (strconv.IntSize == 32 && length <= 0) { + if debug { + fmt.Println("corrupt: lit size", length) + } + return decodeErrCodeCorrupt + } + if debug { + fmt.Println("literals, length:", length, "d-after:", d+length) + } + + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(src[s-2]) >> 2 & 0x7 + toffset := int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + if toffset == 0 { + if debug { + fmt.Print("(repeat) ") + } + // keep last offset + switch length { + case 5: + s += 1 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-1])) + 4 + case 6: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-2])|(uint32(src[s-1])<<8)) + (1 << 8) + case 7: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = int(uint32(src[s-3])|(uint32(src[s-2])<<8)|(uint32(src[s-1])<<16)) + (1 << 16) + default: // 0-> 4 + } + } else { + offset = toffset + } + length += 4 + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + if debug { + fmt.Println("corrupt: match, length", length, "offset:", offset, "dst avail:", len(dst)-d, "dst pos:", d) + } + return decodeErrCodeCorrupt + } + + if debug { + fmt.Println("copy, length:", length, "offset:", offset, "d-after:", d+length) + } + + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset > length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/s2/dict.go b/vendor/github.com/klauspost/compress/s2/dict.go new file mode 100644 index 00000000000..24f7ce80bc5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/dict.go @@ -0,0 +1,331 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "sync" +) + +const ( + // MinDictSize is the minimum dictionary size when repeat has been read. + MinDictSize = 16 + + // MaxDictSize is the maximum dictionary size when repeat has been read. + MaxDictSize = 65536 + + // MaxDictSrcOffset is the maximum offset where a dictionary entry can start. + MaxDictSrcOffset = 65535 +) + +// Dict contains a dictionary that can be used for encoding and decoding s2 +type Dict struct { + dict []byte + repeat int // Repeat as index of dict + + fast, better, best sync.Once + fastTable *[1 << 14]uint16 + + betterTableShort *[1 << 14]uint16 + betterTableLong *[1 << 17]uint16 + + bestTableShort *[1 << 16]uint32 + bestTableLong *[1 << 19]uint32 +} + +// NewDict will read a dictionary. +// It will return nil if the dictionary is invalid. +func NewDict(dict []byte) *Dict { + if len(dict) == 0 { + return nil + } + var d Dict + // Repeat is the first value of the dict + r, n := binary.Uvarint(dict) + if n <= 0 { + return nil + } + dict = dict[n:] + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize || len(dict) > MaxDictSize { + return nil + } + d.repeat = int(r) + if d.repeat > len(dict) { + return nil + } + return &d +} + +// Bytes will return a serialized version of the dictionary. +// The output can be sent to NewDict. +func (d *Dict) Bytes() []byte { + dst := make([]byte, binary.MaxVarintLen16+len(d.dict)) + return append(dst[:binary.PutUvarint(dst, uint64(d.repeat))], d.dict...) +} + +// MakeDict will create a dictionary. +// 'data' must be at least MinDictSize. +// If data is longer than MaxDictSize only the last MaxDictSize bytes will be used. +// If searchStart is set the start repeat value will be set to the last +// match of this content. +// If no matches are found, it will attempt to find shorter matches. +// This content should match the typical start of a block. +// If at least 4 bytes cannot be matched, repeat is set to start of block. +func MakeDict(data []byte, searchStart []byte) *Dict { + if len(data) == 0 { + return nil + } + if len(data) > MaxDictSize { + data = data[len(data)-MaxDictSize:] + } + var d Dict + dict := data + d.dict = dict + if cap(d.dict) < len(d.dict)+16 { + d.dict = append(make([]byte, 0, len(d.dict)+16), d.dict...) + } + if len(dict) < MinDictSize { + return nil + } + + // Find the longest match possible, last entry if multiple. + for s := len(searchStart); s > 4; s-- { + if idx := bytes.LastIndex(data, searchStart[:s]); idx >= 0 && idx <= len(data)-8 { + d.repeat = idx + break + } + } + + return &d +} + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockDictGo(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBetterDict(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func (d *Dict) EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + dstP := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:dstP] + } + if len(src) < minNonLiteralBlockSize { + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] + } + n := encodeBlockBest(dst[dstP:], src, d) + if n > 0 { + dstP += n + return dst[:dstP] + } + // Not compressible + dstP += emitLiteral(dst[dstP:], src) + return dst[:dstP] +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func (d *Dict) Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= cap(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + if s2DecodeDict(dst, src[s:], d) != 0 { + return nil, ErrCorrupt + } + return dst, nil +} + +func (d *Dict) initFast() { + d.fast.Do(func() { + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint16 + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8-2; i += 3 { + x0 := load64(d.dict, i) + h0 := hash6(x0, tableBits) + h1 := hash6(x0>>8, tableBits) + h2 := hash6(x0>>16, tableBits) + table[h0] = uint16(i) + table[h1] = uint16(i + 1) + table[h2] = uint16(i + 2) + } + d.fastTable = &table + }) +} + +func (d *Dict) initBetter() { + d.better.Do(func() { + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint16 + var sTable [maxSTableSize]uint16 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + lTable[hash7(cv, lTableBits)] = uint16(i) + sTable[hash4(cv, sTableBits)] = uint16(i) + } + d.betterTableShort = &sTable + d.betterTableLong = &lTable + }) +} + +func (d *Dict) initBest() { + d.best.Do(func() { + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // We stop so any entry of length 8 can always be read. + for i := 0; i < len(d.dict)-8; i++ { + cv := load64(d.dict, i) + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + lTable[hashL] = uint32(i) | candidateL<<16 + sTable[hashS] = uint32(i) | candidateS<<16 + } + d.bestTableShort = &sTable + d.bestTableLong = &lTable + }) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode.go b/vendor/github.com/klauspost/compress/s2/encode.go new file mode 100644 index 00000000000..e6c2310212f --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode.go @@ -0,0 +1,393 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "math" + "math/bits" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlock(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EstimateBlockSize will perform a very fast compression +// without outputting the result and return the compressed output size. +// The function returns -1 if no improvement could be achieved. +// Using actual compression will most often produce better compression than the estimate. +func EstimateBlockSize(src []byte) (d int) { + if len(src) < 6 || int64(len(src)) > 0xffffffff { + return -1 + } + if len(src) <= 1024 { + d = calcBlockSizeSmall(src) + } else { + d = calcBlockSize(src) + } + + if d == 0 { + return -1 + } + // Size of the varint encoded block size. + d += (bits.Len64(uint64(len(src))) + 7) / 7 + + if d >= len(src) { + return -1 + } + return d +} + +// EncodeBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBetter(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// EncodeBest compresses as good as reasonably possible but with a +// big speed decrease. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockBest(dst[d:], src, nil) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappy returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappy(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBetter returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBetter(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBetterSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// EncodeSnappyBest returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The output is Snappy compatible and will likely decompress faster. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// The blocks will require the same amount of memory to decode as encoding, +// and does not make for concurrent decoding. +// Also note that blocks do not contain CRC information, so corruption may be undetected. +// +// If you need to encode larger amounts of data, consider using +// the streaming interface which gives all of these features. +func EncodeSnappyBest(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if cap(dst) < n { + dst = make([]byte, n) + } else { + dst = dst[:n] + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + + n := encodeBlockBestSnappy(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// ConcatBlocks will concatenate the supplied blocks and append them to the supplied destination. +// If the destination is nil or too small, a new will be allocated. +// The blocks are not validated, so garbage in = garbage out. +// dst may not overlap block data. +// Any data in dst is preserved as is, so it will not be considered a block. +func ConcatBlocks(dst []byte, blocks ...[]byte) ([]byte, error) { + totalSize := uint64(0) + compSize := 0 + for _, b := range blocks { + l, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + totalSize += uint64(l) + compSize += len(b) - hdr + } + if totalSize == 0 { + dst = append(dst, 0) + return dst, nil + } + if totalSize > math.MaxUint32 { + return nil, ErrTooLarge + } + var tmp [binary.MaxVarintLen32]byte + hdrSize := binary.PutUvarint(tmp[:], totalSize) + wantSize := hdrSize + compSize + + if cap(dst)-len(dst) < wantSize { + dst = append(make([]byte, 0, wantSize+len(dst)), dst...) + } + dst = append(dst, tmp[:hdrSize]...) + for _, b := range blocks { + _, hdr, err := decodedLen(b) + if err != nil { + return nil, err + } + dst = append(dst, b[hdr:]...) + } + return dst, nil +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 8 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// will be accepted by the encoder. +const minNonLiteralBlockSize = 32 + +const intReduction = 2 - (1 << (^uint(0) >> 63)) // 1 (32 bits) or 0 (64 bits) + +// MaxBlockSize is the maximum value where MaxEncodedLen will return a valid block size. +// Blocks this big are highly discouraged, though. +// Half the size on 32 bit systems. +const MaxBlockSize = (1<<(32-intReduction) - 1) - binary.MaxVarintLen32 - 5 + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +// 32 bit platforms will have lower thresholds for rejecting big content. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if intReduction == 1 { + // 32 bits + if n > math.MaxInt32 { + // Also includes negative. + return -1 + } + } else if n > 0xffffffff { + // 64 bits + // Also includes negative. + return -1 + } + // Size of the varint encoded block size. + n = n + uint64((bits.Len64(n)+7)/7) + + // Add maximum size of encoding block as literals. + n += uint64(literalExtraSize(int64(srcLen))) + if intReduction == 1 { + // 32 bits + if n > math.MaxInt32 { + return -1 + } + } else if n > 0xffffffff { + // 64 bits + // Also includes negative. + return -1 + } + return int(n) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_all.go b/vendor/github.com/klauspost/compress/s2/encode_all.go new file mode 100644 index 00000000000..5e57995d483 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_all.go @@ -0,0 +1,1048 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/bits" +) + +func load32(b []byte, i int) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load64(b []byte, i int) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash6(u uint64, h uint8) uint32 { + const prime6bytes = 227718039650203 + return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) +} + +func encodeGo(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + if len(src) == 0 { + return dst[:d] + } + if len(src) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], src) + return dst[:d] + } + n := encodeBlockGo(dst[d:], src) + if n > 0 { + d += n + return dst[:d] + } + // Not compressible + d += emitLiteral(dst[d:], src) + return dst[:d] +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + + debug = false + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +func encodeBlockSnappyGo(dst, src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeat(dst[d:], repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockDictGo(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash table. + const ( + tableBits = 14 + maxTableSize = 1 << tableBits + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + dict.initFast() + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if sLimit > MaxDictSrcOffset-maxAhead { + sLimit = MaxDictSrcOffset - maxAhead + } + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form can start with a dict entry (copy or repeat). + s := 0 + + // Convert dict repeat to offset + repeat := len(dict.dict) - dict.repeat + cv := load64(src, 0) + + // While in dict +searchDict: + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + if nextS > sLimit { + if debug { + fmt.Println("slimit reached", s, nextS) + } + break searchDict + } + candidateDict := int(dict.fastTable[hash0]) + candidateDict2 := int(dict.fastTable[hash1]) + candidate2 := int(table[hash1]) + candidate := int(table[hash0]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + + if repeat > s { + candidate := len(dict.dict) - repeat + s + if repeat-s >= 4 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + cv = load64(src, s) + continue + } + } else if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + if debug { + fmt.Println("emitted reg repeat", s-base, "s:", s) + } + cv = load64(src, s) + continue searchDict + } + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue searchDict + } + // Start with table. These matches will always be closer. + if uint32(cv) == load32(src, candidate) { + goto emitMatch + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + goto emitMatch + } + + // Check dict. Dicts have longer offsets, so we want longer matches. + if cv == load64(dict.dict, candidateDict) { + table[hash2] = uint32(s + 2) + goto emitDict + } + + candidateDict = int(dict.fastTable[hash2]) + // Check if upper 7 bytes match + if candidateDict2 >= 1 { + if cv^load64(dict.dict, candidateDict2-1) < (1 << 8) { + table[hash2] = uint32(s + 2) + candidateDict = candidateDict2 + s++ + goto emitDict + } + } + + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + goto emitMatch + } + if candidateDict >= 2 { + // Check if upper 6 bytes match + if cv^load64(dict.dict, candidateDict-2) < (1 << 16) { + s += 2 + goto emitDict + } + } + + cv = load64(src, nextS) + s = nextS + continue searchDict + + emitDict: + { + if debug { + if load32(dict.dict, candidateDict) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateDict > 0 && s > nextEmit && dict.dict[candidateDict-1] == src[s-1] { + candidateDict-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = s + (len(dict.dict)) - candidateDict + + // Extend the 4-byte match as long as possible. + s += 4 + candidateDict += 4 + for s <= len(src)-8 && len(dict.dict)-candidateDict >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateDict); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateDict += 8 + } + + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], repeat, s-base) + } else { + // Split to ensure we don't start a copy within next block + d += emitCopy(dst[d:], repeat, 4) + d += emitRepeat(dst[d:], repeat, s-base-4) + } + if false { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index and continue loop to try new candidate. + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>8, tableBits) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s - 1) + cv = load64(src, s) + } + continue + } + emitMatch: + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + if debug { + fmt.Println("non-dict matching at", s, "repeat:", repeat) + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if repeat > 0 && uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if nextEmit > 0 { + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + } else { + // First match, cannot be repeat. + d += emitCopy(dst[d:], repeat, s-base) + } + if debug { + fmt.Println("emitted src repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopy(dst[d:], repeat, s-base) + if debug { + // Validate match. + if s <= candidate { + panic("s <= candidate") + } + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + if debug { + fmt.Println("emitted src copy, length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if debug && s == candidate { + panic("s == candidate") + } + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go new file mode 100644 index 00000000000..ebc332ad5ff --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -0,0 +1,148 @@ +//go:build !appengine && !noasm && gc +// +build !appengine,!noasm,gc + +package s2 + +const hasAmd64Asm = true + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) >= 4<<20 { + return encodeBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeBlockAsm4MB(dst, src) + } + if len(src) >= limit10B { + return encodeBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockAsm8B(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetter(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + + if len(src) > 4<<20 { + return encodeBetterBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeBetterBlockAsm4MB(dst, src) + } + if len(src) >= limit10B { + return encodeBetterBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeBetterBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBetterBlockAsm8B(dst, src) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockSnappy(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) >= 64<<10 { + return encodeSnappyBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeSnappyBlockAsm64K(dst, src) + } + if len(src) >= limit10B { + return encodeSnappyBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeSnappyBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeSnappyBlockAsm8B(dst, src) +} + +// encodeBlockSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + const ( + // Use 12 bit table when less than... + limit12B = 16 << 10 + // Use 10 bit table when less than... + limit10B = 4 << 10 + // Use 8 bit table when less than... + limit8B = 512 + ) + if len(src) >= 64<<10 { + return encodeSnappyBetterBlockAsm(dst, src) + } + if len(src) >= limit12B { + return encodeSnappyBetterBlockAsm64K(dst, src) + } + if len(src) >= limit10B { + return encodeSnappyBetterBlockAsm12B(dst, src) + } + if len(src) >= limit8B { + return encodeSnappyBetterBlockAsm10B(dst, src) + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeSnappyBetterBlockAsm8B(dst, src) +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_best.go b/vendor/github.com/klauspost/compress/s2/encode_best.go new file mode 100644 index 00000000000..1d13e869a11 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_best.go @@ -0,0 +1,793 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "fmt" + "math" + "math/bits" +) + +// encodeBlockBest encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBest(dst, src []byte, dict *Dict) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + + debug = false + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + sLimitDict := len(src) - inputMargin + if sLimitDict > MaxDictSrcOffset-inputMargin { + sLimitDict = MaxDictSrcOffset - inputMargin + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + repeat := 1 + if dict != nil { + dict.initBest() + s = 0 + repeat = len(dict.dict) - dict.repeat + } + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + rep, dict bool + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + if dict != nil && s >= MaxDictSrcOffset { + dict = nil + if repeat > s { + repeat = math.MinInt32 + } + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + if m.rep { + return score - emitRepeatSize(offset, m.length) + } + return score - emitCopySize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32, rep bool) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset, rep: rep} + s += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + matchDict := func(candidate, s int, first uint32, rep bool) match { + // Calculate offset as if in continuous array with s + offset := -len(dict.dict) + candidate + if best.length != 0 && best.s-best.offset == s-offset && !rep { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + + if load32(dict.dict, candidate) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + candidate, rep: rep, dict: true} + s += 4 + if !rep { + for s < sLimitDict && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } else { + for s < len(src) && m.length < len(dict.dict) { + if len(src)-s < 8 || len(dict.dict)-m.length < 8 { + if src[s] == dict.dict[m.length] { + m.length++ + s++ + continue + } + break + } + if diff := load64(src, s) ^ load64(dict.dict, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + } + m.length -= candidate + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + if s > 0 { + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv), false), matchAt(getPrev(candidateL), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv), false)) + } + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateL>>16), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS>>16), s, uint32(cv), false)) + } + { + if (dict == nil || repeat <= s) && repeat > 0 { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8), true)) + } else if s-repeat < -4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + candidate++ + best = bestOf(best, matchDict(candidate, s+1, uint32(cv>>8), true)) + } + + if best.length > 0 { + hashS := hash4(cv>>8, sTableBits) + // s+1 + nextShort := sTable[hashS] + s := s + 1 + cv := load64(src, s) + hashL := hash8(cv, lTableBits) + nextLong := lTable[hashL] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at + 1 + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } + + // s+2 + if true { + hashS := hash4(cv>>8, sTableBits) + + nextShort = sTable[hashS] + s++ + cv = load64(src, s) + hashL := hash8(cv, lTableBits) + nextLong = lTable[hashL] + + if (dict == nil || repeat <= s) && repeat > 0 { + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat, s, uint32(cv), true)) + } else if repeat-s > 4 && dict != nil { + candidate := len(dict.dict) - (repeat - s) + best = bestOf(best, matchDict(candidate, s, uint32(cv), true)) + } + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv), false)) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv), false)) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv), false)) + + // Dict at +2 + // Very small gain + if dict != nil { + candidateL := dict.bestTableLong[hashL] + candidateS := dict.bestTableShort[hashS] + + best = bestOf(best, matchDict(int(candidateL&0xffff), s, uint32(cv), false)) + best = bestOf(best, matchDict(int(candidateS&0xffff), s, uint32(cv), false)) + } + } + // Search for a match at best match end, see if that is better. + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 1-2 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + const skipEnd = 1 + if sAt := best.s + best.length - skipEnd; sAt < sLimit { + + sBack := best.s + skipBeginning - skipEnd + backL := best.length - skipBeginning + // Load initial values + cv = load64(src, sBack) + + // Grab candidates... + next := lTable[hash8(load64(src, sAt), lTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + // Disabled: Extremely small gain + if false { + next = sTable[hash4(load64(src, sAt), sTableBits)] + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv), false)) + } + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if !best.rep && !best.dict { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + s += best.length + + if offset > 65535 && s-base <= 5 && !best.rep { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + if debug && nextEmit != base { + fmt.Println("EMIT", base-nextEmit, "literals. base-after:", base) + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if best.rep { + if nextEmit > 0 || best.dict { + if debug { + fmt.Println("REPEAT, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], offset, best.length) + } else { + // First match without dict cannot be a repeat. + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + d += emitCopy(dst[d:], offset, best.length) + } + } else { + if debug { + fmt.Println("COPY, length", best.length, "offset:", offset, "s-after:", s, "dict:", best.dict, "best:", best) + } + d += emitCopy(dst[d:], offset, best.length) + } + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + if debug && nextEmit != s { + fmt.Println("emitted ", len(src)-nextEmit, "literals") + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBestSnappy encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBestSnappy(dst, src []byte) (d int) { + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 19 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 16 + maxSTableSize = 1 << sTableBits + + inputMargin = 8 + 2 + ) + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + var lTable [maxLTableSize]uint64 + var sTable [maxSTableSize]uint64 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + const lowbitMask = 0xffffffff + getCur := func(x uint64) int { + return int(x & lowbitMask) + } + getPrev := func(x uint64) int { + return int(x >> 32) + } + const maxSkip = 64 + + for { + type match struct { + offset int + s int + length int + score int + } + var best match + for { + // Next src position to check + nextS := (s-nextEmit)>>8 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + if nextS > sLimit { + goto emitRemainder + } + hashL := hash8(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL := lTable[hashL] + candidateS := sTable[hashS] + + score := func(m match) int { + // Matches that are longer forward are penalized since we must emit it as a literal. + score := m.length - m.s + if nextEmit == m.s { + // If we do not have to emit literals, we save 1 byte + score++ + } + offset := m.s - m.offset + + return score - emitCopyNoRepeatSize(offset, m.length) + } + + matchAt := func(offset, s int, first uint32) match { + if best.length != 0 && best.s-best.offset == s-offset { + // Don't retest if we have the same offset. + return match{offset: offset, s: s} + } + if load32(src, offset) != first { + return match{offset: offset, s: s} + } + m := match{offset: offset, s: s, length: 4 + offset} + s += 4 + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, m.length); diff != 0 { + m.length += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + m.length += 8 + } + m.length -= offset + m.score = score(m) + if m.score <= -m.s { + // Eliminate if no savings, we might find a better one. + m.length = 0 + } + return m + } + + bestOf := func(a, b match) match { + if b.length == 0 { + return a + } + if a.length == 0 { + return b + } + as := a.score + b.s + bs := b.score + a.s + if as >= bs { + return a + } + return b + } + + best = bestOf(matchAt(getCur(candidateL), s, uint32(cv)), matchAt(getPrev(candidateL), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(candidateS), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(candidateS), s, uint32(cv))) + + { + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + if best.length > 0 { + // s+1 + nextShort := sTable[hash4(cv>>8, sTableBits)] + s := s + 1 + cv := load64(src, s) + nextLong := lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + // Repeat at + 2 + best = bestOf(best, matchAt(s-repeat+1, s+1, uint32(cv>>8))) + + // s+2 + if true { + nextShort = sTable[hash4(cv>>8, sTableBits)] + s++ + cv = load64(src, s) + nextLong = lTable[hash8(cv, lTableBits)] + best = bestOf(best, matchAt(getCur(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextShort), s, uint32(cv))) + best = bestOf(best, matchAt(getCur(nextLong), s, uint32(cv))) + best = bestOf(best, matchAt(getPrev(nextLong), s, uint32(cv))) + } + // Search for a match at best match end, see if that is better. + if sAt := best.s + best.length; sAt < sLimit { + sBack := best.s + backL := best.length + // Load initial values + cv = load64(src, sBack) + // Search for mismatch + next := lTable[hash8(load64(src, sAt), lTableBits)] + //next := sTable[hash4(load64(src, sAt), sTableBits)] + + if checkAt := getCur(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + if checkAt := getPrev(next) - backL; checkAt > 0 { + best = bestOf(best, matchAt(checkAt, sBack, uint32(cv))) + } + } + } + } + + // Update table + lTable[hashL] = uint64(s) | candidateL<<32 + sTable[hashS] = uint64(s) | candidateS<<32 + + if best.length > 0 { + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards, not needed for repeats... + s = best.s + if true { + for best.offset > 0 && s > nextEmit && src[best.offset-1] == src[s-1] { + best.offset-- + best.length++ + s-- + } + } + if false && best.offset >= s { + panic(fmt.Errorf("t %d >= s %d", best.offset, s)) + } + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := s - best.offset + + s += best.length + + if offset > 65535 && s-base <= 5 { + // Bail if the match is equal or worse to the encoding. + s = best.s + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, best.length) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Fill tables... + for i := best.s + 1; i < s; i++ { + cv0 := load64(src, i) + long0 := hash8(cv0, lTableBits) + short0 := hash4(cv0, sTableBits) + lTable[long0] = uint64(i) | lTable[long0]<<32 + sTable[short0] = uint64(i) | sTable[short0]<<32 + } + cv = load64(src, s) + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// emitCopySize returns the size to encode the offset+length +// +// It assumes that: +// +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopySize(offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeatSize(offset, length) + } + i = 5 + } + if length == 0 { + return i + } + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + if offset < 2048 { + // Emit 8 bytes, then rest as repeats... + return 2 + emitRepeatSize(offset, length-8) + } + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitRepeatSize(offset, length-60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitCopyNoRepeatSize returns the size to encode the offset+length +// +// It assumes that: +// +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeatSize(offset, length int) int { + if offset >= 65536 { + return 5 + 5*(length/64) + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + 3*(length/60) + } + if length >= 12 || offset >= 2048 { + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + return 2 +} + +// emitRepeatSize returns the number of bytes required to encode a repeat. +// Length must be at least 4 and < 1<<24 +func emitRepeatSize(offset, length int) int { + // Repeat offset, make length cheaper + if length <= 4+4 || (length < 8+4 && offset < 2048) { + return 2 + } + if length < (1<<8)+4+4 { + return 3 + } + if length < (1<<16)+(1<<8)+4 { + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= (1 << 16) - 4 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + } + if left > 0 { + return 5 + emitRepeatSize(offset, left) + } + return 5 +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_better.go b/vendor/github.com/klauspost/compress/s2/encode_better.go new file mode 100644 index 00000000000..544cb1e17b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_better.go @@ -0,0 +1,1106 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "fmt" + "math/bits" +) + +// hash4 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4(u uint64, h uint8) uint32 { + const prime4bytes = 2654435761 + return (uint32(u) * prime4bytes) >> ((32 - h) & 31) +} + +// hash5 returns the hash of the lowest 5 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash5(u uint64, h uint8) uint32 { + const prime5bytes = 889523592379 + return uint32(((u << (64 - 40)) * prime5bytes) >> ((64 - h) & 63)) +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + const prime7bytes = 58295818150454627 + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) +} + +// hash8 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash8(u uint64, h uint8) uint32 { + const prime8bytes = 0xcf1bbcdcb7a56463 + return uint32((u * prime8bytes) >> ((64 - h) & 63)) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + // lTable could be postponed, but very minor difference. + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterSnappyGo(dst, src []byte) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + if len(src) < minNonLiteralBlockSize { + return 0 + } + + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 16 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + ) + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := 0 + const maxSkip = 100 + + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = (s-nextEmit)>>7 + 1 + if nextS > maxSkip { + nextS = s + maxSkip + } else { + nextS += s + } + + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + if uint32(cv) == load32(src, candidateL) { + break + } + + // Check our short candidate + if uint32(cv) == load32(src, candidateS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + d += emitCopyNoRepeat(dst[d:], offset, s-base) + repeat = offset + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} + +// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlockBetterDict(dst, src []byte, dict *Dict) (d int) { + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + // Initialize the hash tables. + const ( + // Long hash matches. + lTableBits = 17 + maxLTableSize = 1 << lTableBits + + // Short hash matches. + sTableBits = 14 + maxSTableSize = 1 << sTableBits + + maxAhead = 8 // maximum bytes ahead without checking sLimit + + debug = false + ) + + sLimit := len(src) - inputMargin + if sLimit > MaxDictSrcOffset-maxAhead { + sLimit = MaxDictSrcOffset - maxAhead + } + if len(src) < minNonLiteralBlockSize { + return 0 + } + + dict.initBetter() + + var lTable [maxLTableSize]uint32 + var sTable [maxSTableSize]uint32 + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 0 + cv := load64(src, s) + + // We initialize repeat to 0, so we never match on first attempt + repeat := len(dict.dict) - dict.repeat + + // While in dict +searchDict: + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + break searchDict + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + dictL := int(dict.betterTableLong[hashL]) + dictS := int(dict.betterTableShort[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if s != 0 { + if cv == valLong { + goto emitMatch + } + if cv == valShort { + candidateL = candidateS + goto emitMatch + } + } + + // Check dict repeat. + if repeat >= s+4 { + candidate := len(dict.dict) - repeat + s + if candidate > 0 && uint32(cv) == load32(dict.dict, candidate) { + // Extend back + base := s + for i := candidate; base > nextEmit && i > 0 && dict.dict[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != base { + fmt.Println("emitted ", base-nextEmit, "literals") + } + s += 4 + candidate += 4 + for candidate < len(dict.dict)-8 && s <= len(src)-8 { + if diff := load64(src, s) ^ load64(dict.dict, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + d += emitRepeat(dst[d:], repeat, s-base) + if debug { + fmt.Println("emitted dict repeat length", s-base, "offset:", repeat, "s:", s) + } + nextEmit = s + if s >= sLimit { + break searchDict + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + cv = load64(src, s) + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + continue + } + } + // Don't try to find match at s==0 + if s == 0 { + cv = load64(src, nextS) + s = nextS + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + goto emitMatch + } + + // Long dict... + if uint32(cv) == load32(dict.dict, dictL) { + candidateL = dictL + goto emitDict + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + // Use our short candidate. + candidateL = candidateS + goto emitMatch + } + if uint32(cv) == load32(dict.dict, dictS) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + goto emitMatch + } + candidateL = dictS + goto emitDict + } + cv = load64(src, nextS) + s = nextS + } + emitDict: + { + if debug { + if load32(dict.dict, candidateL) != load32(src, s) { + panic("dict emit mismatch") + } + } + // Extend backwards. + // The top bytes will be rechecked to get the full match. + for candidateL > 0 && s > nextEmit && dict.dict[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteral(dst[d:], src[nextEmit:s]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + offset := s + (len(dict.dict)) - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s <= len(src)-8 && len(dict.dict)-candidateL >= 8 { + if diff := load64(src, s) ^ load64(dict.dict, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if repeat == offset { + if debug { + fmt.Println("emitted dict repeat, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted dict copy, length", s-base, "offset:", offset, "s:", s, "dict offset:", candidateL) + } + // Matches longer than 64 are split. + if s <= sLimit || s-base < 8 { + d += emitCopy(dst[d:], offset, s-base) + } else { + // Split to ensure we don't start a copy within next block. + d += emitCopy(dst[d:], offset, 4) + d += emitRepeat(dst[d:], offset, s-base-4) + } + repeat = offset + } + if false { + // Validate match. + if s <= candidateL { + panic("s <= candidate") + } + a := src[base:s] + b := dict.dict[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + break searchDict + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // index every second long in between. + for index0 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index1), lTableBits)] = uint32(index1) + index0 += 2 + index1 -= 2 + } + } + continue + } + emitMatch: + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if debug && nextEmit != s { + fmt.Println("emitted ", s-nextEmit, "literals") + } + if repeat == offset { + if debug { + fmt.Println("emitted match repeat, length", s-base, "offset:", offset, "s:", s) + } + d += emitRepeat(dst[d:], offset, s-base) + } else { + if debug { + fmt.Println("emitted match copy, length", s-base, "offset:", offset, "s:", s) + } + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + + // Search without dict: + if repeat > s { + repeat = 0 + } + + // No more dict + sLimit = len(src) - inputMargin + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + if debug { + fmt.Println("now", s, "->", sLimit, "out:", d, "left:", len(src)-s, "nextemit:", nextEmit, "dstLimit:", dstLimit, "s:", s) + } + for { + candidateL := 0 + nextS := 0 + for { + // Next src position to check + nextS = s + (s-nextEmit)>>7 + 1 + if nextS > sLimit { + goto emitRemainder + } + hashL := hash7(cv, lTableBits) + hashS := hash4(cv, sTableBits) + candidateL = int(lTable[hashL]) + candidateS := int(sTable[hashS]) + lTable[hashL] = uint32(s) + sTable[hashS] = uint32(s) + + valLong := load64(src, candidateL) + valShort := load64(src, candidateS) + + // If long matches at least 8 bytes, use that. + if cv == valLong { + break + } + if cv == valShort { + candidateL = candidateS + break + } + + // Check repeat at offset checkRep. + const checkRep = 1 + // Minimum length of a repeat. Tested with various values. + // While 4-5 offers improvements in some, 6 reduces + // regressions significantly. + const wantRepeatBytes = 6 + const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep) + if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteral(dst[d:], src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + wantRepeatBytes + checkRep + s += wantRepeatBytes + checkRep + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidate] { + s++ + candidate++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + // same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset. + d += emitRepeat(dst[d:], repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + // Index in-between + index0 := base + 1 + index1 := s - 2 + + for index0 < index1 { + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 2 + index1 -= 2 + } + + cv = load64(src, s) + continue + } + + // Long likely matches 7, so take that. + if uint32(cv) == uint32(valLong) { + break + } + + // Check our short candidate + if uint32(cv) == uint32(valShort) { + // Try a long candidate at s+1 + hashL = hash7(cv>>8, lTableBits) + candidateL = int(lTable[hashL]) + lTable[hashL] = uint32(s + 1) + if uint32(cv>>8) == load32(src, candidateL) { + s++ + break + } + // Use our short candidate. + candidateL = candidateS + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] { + candidateL-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + base := s + offset := base - candidateL + + // Extend the 4-byte match as long as possible. + s += 4 + candidateL += 4 + for s < len(src) { + if len(src)-s < 8 { + if src[s] == src[candidateL] { + s++ + candidateL++ + continue + } + break + } + if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidateL += 8 + } + + if offset > 65535 && s-base <= 5 && repeat != offset { + // Bail if the match is equal or worse to the encoding. + s = nextS + 1 + if s >= sLimit { + goto emitRemainder + } + cv = load64(src, s) + continue + } + + d += emitLiteral(dst[d:], src[nextEmit:base]) + if repeat == offset { + d += emitRepeat(dst[d:], offset, s-base) + } else { + d += emitCopy(dst[d:], offset, s-base) + repeat = offset + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + + // Index short & long + index0 := base + 1 + index1 := s - 2 + + cv0 := load64(src, index0) + cv1 := load64(src, index1) + lTable[hash7(cv0, lTableBits)] = uint32(index0) + sTable[hash4(cv0>>8, sTableBits)] = uint32(index0 + 1) + + lTable[hash7(cv1, lTableBits)] = uint32(index1) + sTable[hash4(cv1>>8, sTableBits)] = uint32(index1 + 1) + index0 += 1 + index1 -= 1 + cv = load64(src, s) + + // Index large values sparsely in between. + // We do two starting from different offsets for speed. + index2 := (index0 + index1 + 1) >> 1 + for index2 < index1 { + lTable[hash7(load64(src, index0), lTableBits)] = uint32(index0) + lTable[hash7(load64(src, index2), lTableBits)] = uint32(index2) + index0 += 2 + index2 += 2 + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/s2/encode_go.go b/vendor/github.com/klauspost/compress/s2/encode_go.go new file mode 100644 index 00000000000..0d39c7b0e01 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encode_go.go @@ -0,0 +1,727 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package s2 + +import ( + "bytes" + "math/bits" +) + +const hasAmd64Asm = false + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlock(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetter(dst, src []byte) (d int) { + return encodeBlockBetterGo(dst, src) +} + +// encodeBlockBetter encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockBetterSnappy(dst, src []byte) (d int) { + return encodeBlockBetterSnappyGo(dst, src) +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) +func encodeBlockSnappy(dst, src []byte) (d int) { + if len(src) < minNonLiteralBlockSize { + return 0 + } + return encodeBlockSnappyGo(dst, src) +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteral(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + const num = 63<<2 | tagLiteral + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat(dst []byte, offset, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopy(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +func emitCopyNoRepeat(dst []byte, offset, length int) int { + if offset >= 65536 { + i := 0 + if length > 64 { + // Emit a length 64 copy, encoded as 5 bytes. + dst[4] = uint8(offset >> 24) + dst[3] = uint8(offset >> 16) + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 63<<2 | tagCopy4 + length -= 64 + if length >= 4 { + // Emit remaining as repeats + return 5 + emitCopyNoRepeat(dst[5:], offset, length) + } + i = 5 + } + if length == 0 { + return i + } + // Emit a copy, offset encoded as 4 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy4 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + dst[i+3] = uint8(offset >> 16) + dst[i+4] = uint8(offset >> 24) + return i + 5 + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + // Emit remaining as repeats, at least 4 bytes remain. + return 3 + emitCopyNoRepeat(dst[3:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) +func matchLen(a []byte, b []byte) int { + b = b[:len(a)] + var checked int + if len(a) > 4 { + // Try 4 bytes first + if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { + return bits.TrailingZeros32(diff) >> 3 + } + // Switch to 8 byte matching. + checked = 4 + a = a[4:] + b = b[4:] + for len(a) >= 8 { + b = b[:len(a)] + if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return int(i) + checked + } + } + return len(a) + checked +} + +func calcBlockSize(src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 13 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +func calcBlockSizeSmall(src []byte) (d int) { + // Initialize the hash table. + const ( + tableBits = 9 + maxTableSize = 1 << tableBits + ) + + var table [maxTableSize]uint32 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // Bail if we can't compress to at least this. + dstLimit := len(src) - len(src)>>5 - 5 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + cv := load64(src, s) + + // We search for a repeat at -1, but don't output repeats when nextEmit == 0 + repeat := 1 + + for { + candidate := 0 + for { + // Next src position to check + nextS := s + (s-nextEmit)>>6 + 4 + if nextS > sLimit { + goto emitRemainder + } + hash0 := hash6(cv, tableBits) + hash1 := hash6(cv>>8, tableBits) + candidate = int(table[hash0]) + candidate2 := int(table[hash1]) + table[hash0] = uint32(s) + table[hash1] = uint32(s + 1) + hash2 := hash6(cv>>16, tableBits) + + // Check repeat at offset checkRep. + const checkRep = 1 + if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) { + base := s + checkRep + // Extend back + for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; { + i-- + base-- + } + d += emitLiteralSize(src[nextEmit:base]) + + // Extend forward + candidate := s - repeat + 4 + checkRep + s += 4 + checkRep + for s <= sLimit { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + cv = load64(src, s) + continue + } + + if uint32(cv) == load32(src, candidate) { + break + } + candidate = int(table[hash2]) + if uint32(cv>>8) == load32(src, candidate2) { + table[hash2] = uint32(s + 2) + candidate = candidate2 + s++ + break + } + table[hash2] = uint32(s + 2) + if uint32(cv>>16) == load32(src, candidate) { + s += 2 + break + } + + cv = load64(src, nextS) + s = nextS + } + + // Extend backwards + for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] { + candidate-- + s-- + } + + // Bail if we exceed the maximum size. + if d+(s-nextEmit) > dstLimit { + return 0 + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + d += emitLiteralSize(src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + repeat = base - candidate + + // Extend the 4-byte match as long as possible. + s += 4 + candidate += 4 + for s <= len(src)-8 { + if diff := load64(src, s) ^ load64(src, candidate); diff != 0 { + s += bits.TrailingZeros64(diff) >> 3 + break + } + s += 8 + candidate += 8 + } + + d += emitCopyNoRepeatSize(repeat, s-base) + if false { + // Validate match. + a := src[base:s] + b := src[base-repeat : base-repeat+(s-base)] + if !bytes.Equal(a, b) { + panic("mismatch") + } + } + + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if d > dstLimit { + // Do we have space for more, if not bail. + return 0 + } + // Check for an immediate match, otherwise start search at s+1 + x := load64(src, s-2) + m2Hash := hash6(x, tableBits) + currHash := hash6(x>>16, tableBits) + candidate = int(table[currHash]) + table[m2Hash] = uint32(s - 2) + table[currHash] = uint32(s) + if uint32(x>>16) != load32(src, candidate) { + cv = load64(src, s+1) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + // Bail if we exceed the maximum size. + if d+len(src)-nextEmit > dstLimit { + return 0 + } + d += emitLiteralSize(src[nextEmit:]) + } + return d +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralSize(lit []byte) int { + if len(lit) == 0 { + return 0 + } + switch { + case len(lit) <= 60: + return len(lit) + 1 + case len(lit) <= 1<<8: + return len(lit) + 2 + case len(lit) <= 1<<16: + return len(lit) + 3 + case len(lit) <= 1<<24: + return len(lit) + 4 + default: + return len(lit) + 5 + } +} + +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockAsm should be unreachable") +} + +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4BlockSnappyAsm should be unreachable") +} + +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockAsm should be unreachable") +} + +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { + panic("cvtLZ4sBlockSnappyAsm should be unreachable") +} diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go new file mode 100644 index 00000000000..297e41501ba --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go @@ -0,0 +1,228 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +package s2 + +func _dummy_() + +// encodeBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm(dst []byte, src []byte) int + +// encodeBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm4MB(dst []byte, src []byte) int + +// encodeBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm12B(dst []byte, src []byte) int + +// encodeBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm10B(dst []byte, src []byte) int + +// encodeBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBlockAsm8B(dst []byte, src []byte) int + +// encodeBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm(dst []byte, src []byte) int + +// encodeBetterBlockAsm4MB encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4194304 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm4MB(dst []byte, src []byte) int + +// encodeBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm12B(dst []byte, src []byte) int + +// encodeBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm10B(dst []byte, src []byte) int + +// encodeBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeBetterBlockAsm8B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm(dst []byte, src []byte) int + +// encodeSnappyBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm64K(dst []byte, src []byte) int + +// encodeSnappyBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm12B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm10B(dst []byte, src []byte) int + +// encodeSnappyBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBlockAsm8B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm64K encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 65535 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm12B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 16383 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm10B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4095 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int + +// encodeSnappyBetterBlockAsm8B encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 511 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int + +// calcBlockSize encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 4294967295 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSize(src []byte) int + +// calcBlockSizeSmall encodes a non-empty src to a guaranteed-large-enough dst. +// Maximum input 1024 bytes. +// It assumes that the varint-encoded length of the decompressed bytes has already been written. +// +//go:noescape +func calcBlockSizeSmall(src []byte) int + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes with margin of 0 bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +// +//go:noescape +func emitLiteral(dst []byte, lit []byte) int + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<32 +// +//go:noescape +func emitRepeat(dst []byte, offset int, length int) int + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopy(dst []byte, offset int, length int) int + +// emitCopyNoRepeat writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint32 +// 4 <= length && length <= 1 << 24 +// +//go:noescape +func emitCopyNoRepeat(dst []byte, offset int, length int) int + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) +// +//go:noescape +func matchLen(a []byte, b []byte) int + +// cvtLZ4Block converts an LZ4 block to S2 +// +//go:noescape +func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to S2 +// +//go:noescape +func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4Block converts an LZ4 block to Snappy +// +//go:noescape +func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) + +// cvtLZ4sBlock converts an LZ4s block to Snappy +// +//go:noescape +func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s new file mode 100644 index 00000000000..54031aa3133 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s @@ -0,0 +1,20399 @@ +// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func _dummy_() +TEXT ·_dummy_(SB), $0 +#ifdef GOAMD64_v4 +#ifndef GOAMD64_v3 +#define GOAMD64_v3 +#endif +#endif + RET + +// func encodeBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm + +repeat_extend_back_loop_encodeBlockAsm: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm + +repeat_extend_back_end_encodeBlockAsm: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_repeat_emit_encodeBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_repeat_emit_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +four_bytes_repeat_emit_encodeBlockAsm: + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +three_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm + +two_bytes_repeat_emit_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm + JMP memmove_long_repeat_emit_encodeBlockAsm + +one_byte_repeat_emit_encodeBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm + +emit_lit_memmove_repeat_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm + +memmove_long_repeat_emit_encodeBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm + +matchlen_loopback_repeat_extend_encodeBlockAsm: + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm + +matchlen_loop_repeat_extend_encodeBlockAsm: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JAE matchlen_loopback_repeat_extend_encodeBlockAsm + +matchlen_match4_repeat_extend_encodeBlockAsm: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm + JB repeat_extend_forward_end_encodeBlockAsm + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm + +matchlen_match1_repeat_extend_encodeBlockAsm: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_repeat_encodeBlockAsm: + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm + CMPL SI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm + +cant_repeat_two_offset_match_repeat_encodeBlockAsm: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm + CMPL BX, $0x00010100 + JB repeat_four_match_repeat_encodeBlockAsm + CMPL BX, $0x0100ffff + JB repeat_five_match_repeat_encodeBlockAsm + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_repeat_encodeBlockAsm + +repeat_five_match_repeat_encodeBlockAsm: + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_match_repeat_encodeBlockAsm: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_match_repeat_encodeBlockAsm: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_match_repeat_encodeBlockAsm: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_match_repeat_encodeBlockAsm: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_as_copy_encodeBlockAsm: + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeBlockAsm + CMPL BX, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(BX), BX + ADDQ $0x05, AX + CMPL BX, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeBlockAsm + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy: + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy + CMPL BX, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +four_bytes_remain_repeat_as_copy_encodeBlockAsm: + TESTL BX, BX + JZ repeat_end_emit_encodeBlockAsm + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_repeat_as_copy_encodeBlockAsm: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + MOVL SI, R8 + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + CMPL BX, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +long_offset_short_repeat_as_copy_encodeBlockAsm: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short: + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short + CMPL BX, $0x0100ffff + JB repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short + LEAL -16842747(BX), BX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_repeat_as_copy_encodeBlockAsm_emit_copy_short + +repeat_five_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_four_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_three_repeat_as_copy_encodeBlockAsm_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_repeat_as_copy_encodeBlockAsm_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +repeat_two_offset_repeat_as_copy_encodeBlockAsm_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm + +emit_copy_three_repeat_as_copy_encodeBlockAsm: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm + +no_repeat_found_encodeBlockAsm: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm + +candidate3_match_encodeBlockAsm: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm + +candidate2_match_encodeBlockAsm: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm + +match_extend_back_loop_encodeBlockAsm: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm + JMP match_extend_back_loop_encodeBlockAsm + +match_extend_back_end_encodeBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm + CMPL DI, $0x00010000 + JB three_bytes_match_emit_encodeBlockAsm + CMPL DI, $0x01000000 + JB four_bytes_match_emit_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeBlockAsm + +four_bytes_match_emit_encodeBlockAsm: + MOVL DI, R9 + SHRL $0x10, R9 + MOVB $0xf8, (AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBlockAsm + +three_bytes_match_emit_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm + +two_bytes_match_emit_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm + JMP memmove_long_match_emit_encodeBlockAsm + +one_byte_match_emit_encodeBlockAsm: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm + +emit_lit_memmove_match_emit_encodeBlockAsm_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm + +memmove_long_match_emit_encodeBlockAsm: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm: +match_nolit_loop_encodeBlockAsm: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm + +matchlen_loopback_match_nolit_encodeBlockAsm: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm + +matchlen_loop_match_nolit_encodeBlockAsm: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBlockAsm + +matchlen_match4_match_nolit_encodeBlockAsm: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm + JB match_nolit_end_encodeBlockAsm + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm + +matchlen_match1_match_nolit_encodeBlockAsm: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JB two_byte_offset_match_nolit_encodeBlockAsm + CMPL R9, $0x40 + JBE four_bytes_remain_match_nolit_encodeBlockAsm + MOVB $0xff, (AX) + MOVL BX, 1(AX) + LEAL -64(R9), R9 + ADDQ $0x05, AX + CMPL R9, $0x04 + JB four_bytes_remain_match_nolit_encodeBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy: + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy + CMPL R9, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBlockAsm_emit_copy: + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +four_bytes_remain_match_nolit_encodeBlockAsm: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_encodeBlockAsm + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_match_nolit_encodeBlockAsm: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + MOVL BX, DI + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b: + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b + CMPL R9, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short_2b + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +long_offset_short_match_nolit_encodeBlockAsm: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short: + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm_emit_copy_short + CMPL R9, $0x0100ffff + JB repeat_five_match_nolit_encodeBlockAsm_emit_copy_short + LEAL -16842747(R9), R9 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_four_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_three_match_nolit_encodeBlockAsm_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_match_nolit_encodeBlockAsm_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +repeat_two_offset_match_nolit_encodeBlockAsm_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +two_byte_offset_short_match_nolit_encodeBlockAsm: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm + +emit_copy_three_match_nolit_encodeBlockAsm: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm + INCL CX + JMP search_loop_encodeBlockAsm + +emit_remainder_encodeBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +four_bytes_emit_remainder_encodeBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +three_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm + +two_bytes_emit_remainder_encodeBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm + JMP memmove_long_emit_remainder_encodeBlockAsm + +one_byte_emit_remainder_encodeBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm + +emit_lit_memmove_emit_remainder_encodeBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm + +memmove_long_emit_remainder_encodeBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm4MB(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm4MB(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm4MB: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm4MB: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm4MB + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm4MB + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm4MB + +repeat_extend_back_loop_encodeBlockAsm4MB: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm4MB + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm4MB + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm4MB + +repeat_extend_back_end_encodeBlockAsm4MB: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm4MB + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm4MB + CMPL BX, $0x00010000 + JB three_bytes_repeat_emit_encodeBlockAsm4MB + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +three_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +two_bytes_repeat_emit_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm4MB + JMP memmove_long_repeat_emit_encodeBlockAsm4MB + +one_byte_repeat_emit_encodeBlockAsm4MB: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm4MB + +emit_lit_memmove_repeat_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm4MB: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm4MB + +memmove_long_repeat_emit_encodeBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm4MB: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm4MB + +matchlen_loopback_repeat_extend_encodeBlockAsm4MB: + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeBlockAsm4MB + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_loop_repeat_extend_encodeBlockAsm4MB: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JAE matchlen_loopback_repeat_extend_encodeBlockAsm4MB + +matchlen_match4_repeat_extend_encodeBlockAsm4MB: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm4MB + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm4MB: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm4MB + JB repeat_extend_forward_end_encodeBlockAsm4MB + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm4MB + +matchlen_match1_repeat_extend_encodeBlockAsm4MB: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm4MB + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm4MB: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm4MB + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm4MB + +cant_repeat_two_offset_match_repeat_encodeBlockAsm4MB: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm4MB + CMPL BX, $0x00010100 + JB repeat_four_match_repeat_encodeBlockAsm4MB + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_match_repeat_encodeBlockAsm4MB: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_match_repeat_encodeBlockAsm4MB: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_match_repeat_encodeBlockAsm4MB: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_match_repeat_encodeBlockAsm4MB: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_as_copy_encodeBlockAsm4MB: + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeBlockAsm4MB + CMPL BX, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(BX), BX + ADDQ $0x05, AX + CMPL BX, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +four_bytes_remain_repeat_as_copy_encodeBlockAsm4MB: + TESTL BX, BX + JZ repeat_end_emit_encodeBlockAsm4MB + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_repeat_as_copy_encodeBlockAsm4MB: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm4MB + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +long_offset_short_repeat_as_copy_encodeBlockAsm4MB: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + CMPL BX, $0x00010100 + JB repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(BX), BX + MOVL BX, SI + MOVW $0x001d, (AX) + MOVW BX, 2(AX) + SARL $0x10, SI + MOVB SI, 4(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_four_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_three_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +repeat_two_offset_repeat_as_copy_encodeBlockAsm4MB_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm4MB: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm4MB + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm4MB + +emit_copy_three_repeat_as_copy_encodeBlockAsm4MB: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm4MB: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm4MB + +no_repeat_found_encodeBlockAsm4MB: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm4MB + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm4MB + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm4MB + +candidate3_match_encodeBlockAsm4MB: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm4MB + +candidate2_match_encodeBlockAsm4MB: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm4MB: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm4MB + +match_extend_back_loop_encodeBlockAsm4MB: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm4MB + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm4MB + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm4MB + JMP match_extend_back_loop_encodeBlockAsm4MB + +match_extend_back_end_encodeBlockAsm4MB: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 4(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm4MB: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm4MB + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm4MB + CMPL DI, $0x00010000 + JB three_bytes_match_emit_encodeBlockAsm4MB + MOVL DI, R9 + SHRL $0x10, R9 + MOVB $0xf8, (AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +three_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm4MB + +two_bytes_match_emit_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm4MB + JMP memmove_long_match_emit_encodeBlockAsm4MB + +one_byte_match_emit_encodeBlockAsm4MB: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm4MB: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm4MB: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm4MB + +memmove_long_match_emit_encodeBlockAsm4MB: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm4MB: +match_nolit_loop_encodeBlockAsm4MB: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm4MB + +matchlen_loopback_match_nolit_encodeBlockAsm4MB: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeBlockAsm4MB + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm4MB + +matchlen_loop_match_nolit_encodeBlockAsm4MB: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBlockAsm4MB + +matchlen_match4_match_nolit_encodeBlockAsm4MB: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm4MB + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm4MB + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm4MB: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm4MB + JB match_nolit_end_encodeBlockAsm4MB + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm4MB + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm4MB + +matchlen_match1_match_nolit_encodeBlockAsm4MB: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm4MB + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm4MB: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JB two_byte_offset_match_nolit_encodeBlockAsm4MB + CMPL R9, $0x40 + JBE four_bytes_remain_match_nolit_encodeBlockAsm4MB + MOVB $0xff, (AX) + MOVL BX, 1(AX) + LEAL -64(R9), R9 + ADDQ $0x05, AX + CMPL R9, $0x04 + JB four_bytes_remain_match_nolit_encodeBlockAsm4MB + + // emitRepeat + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBlockAsm4MB: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_encodeBlockAsm4MB + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_match_nolit_encodeBlockAsm4MB: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm4MB + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm4MB + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +long_offset_short_match_nolit_encodeBlockAsm4MB: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short + CMPL R9, $0x00010100 + JB repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short + LEAL -65536(R9), R9 + MOVL R9, BX + MOVW $0x001d, (AX) + MOVW R9, 2(AX) + SARL $0x10, BX + MOVB BL, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_four_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_three_match_nolit_encodeBlockAsm4MB_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_match_nolit_encodeBlockAsm4MB_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBlockAsm4MB_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBlockAsm4MB: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm4MB + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm4MB + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm4MB + +emit_copy_three_match_nolit_encodeBlockAsm4MB: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm4MB: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm4MB + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm4MB: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm4MB + INCL CX + JMP search_loop_encodeBlockAsm4MB + +emit_remainder_encodeBlockAsm4MB: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 4(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm4MB: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm4MB + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm4MB + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +three_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +two_bytes_emit_remainder_encodeBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBlockAsm4MB + +one_byte_emit_remainder_encodeBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBlockAsm4MB_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm4MB: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm4MB + +memmove_long_emit_remainder_encodeBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm4MB: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm12B(SB), $16408-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000080, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm12B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm12B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x18, R10 + IMULQ R8, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm12B + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm12B + +repeat_extend_back_loop_encodeBlockAsm12B: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm12B + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm12B + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm12B + +repeat_extend_back_end_encodeBlockAsm12B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm12B + JB three_bytes_repeat_emit_encodeBlockAsm12B + +three_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +two_bytes_repeat_emit_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm12B + JMP memmove_long_repeat_emit_encodeBlockAsm12B + +one_byte_repeat_emit_encodeBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm12B + +memmove_long_repeat_emit_encodeBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm12B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm12B + +matchlen_loopback_repeat_extend_encodeBlockAsm12B: + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_loop_repeat_extend_encodeBlockAsm12B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JAE matchlen_loopback_repeat_extend_encodeBlockAsm12B + +matchlen_match4_repeat_extend_encodeBlockAsm12B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm12B + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm12B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm12B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm12B + JB repeat_extend_forward_end_encodeBlockAsm12B + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm12B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm12B + +matchlen_match1_repeat_extend_encodeBlockAsm12B: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm12B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm12B: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm12B + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm12B + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm12B + CMPL SI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm12B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm12B: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm12B + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_match_repeat_encodeBlockAsm12B: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_match_repeat_encodeBlockAsm12B: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_match_repeat_encodeBlockAsm12B: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_as_copy_encodeBlockAsm12B: + // emitCopy + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm12B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +long_offset_short_repeat_as_copy_encodeBlockAsm12B: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_three_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm12B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm12B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm12B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeBlockAsm12B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm12B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm12B + +no_repeat_found_encodeBlockAsm12B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm12B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm12B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm12B + +candidate3_match_encodeBlockAsm12B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm12B + +candidate2_match_encodeBlockAsm12B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm12B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm12B + +match_extend_back_loop_encodeBlockAsm12B: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm12B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm12B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm12B + JMP match_extend_back_loop_encodeBlockAsm12B + +match_extend_back_end_encodeBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm12B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm12B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm12B + JB three_bytes_match_emit_encodeBlockAsm12B + +three_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm12B + +two_bytes_match_emit_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm12B + JMP memmove_long_match_emit_encodeBlockAsm12B + +one_byte_match_emit_encodeBlockAsm12B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm12B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm12B + +emit_lit_memmove_match_emit_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm12B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm12B + +memmove_long_match_emit_encodeBlockAsm12B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm12B: +match_nolit_loop_encodeBlockAsm12B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm12B + +matchlen_loopback_match_nolit_encodeBlockAsm12B: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm12B + +matchlen_loop_match_nolit_encodeBlockAsm12B: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBlockAsm12B + +matchlen_match4_match_nolit_encodeBlockAsm12B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm12B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm12B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm12B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm12B + JB match_nolit_end_encodeBlockAsm12B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm12B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm12B + +matchlen_match1_match_nolit_encodeBlockAsm12B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm12B + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm12B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm12B + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm12B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +long_offset_short_match_nolit_encodeBlockAsm12B: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_three_match_nolit_encodeBlockAsm12B_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_match_nolit_encodeBlockAsm12B_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +repeat_two_offset_match_nolit_encodeBlockAsm12B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBlockAsm12B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm12B + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm12B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm12B + +emit_copy_three_match_nolit_encodeBlockAsm12B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm12B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm12B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x18, DI + IMULQ R8, DI + SHRQ $0x34, DI + SHLQ $0x18, BX + IMULQ R8, BX + SHRQ $0x34, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm12B + INCL CX + JMP search_loop_encodeBlockAsm12B + +emit_remainder_encodeBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm12B + JB three_bytes_emit_remainder_encodeBlockAsm12B + +three_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +two_bytes_emit_remainder_encodeBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm12B + JMP memmove_long_emit_remainder_encodeBlockAsm12B + +one_byte_emit_remainder_encodeBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm12B + +memmove_long_emit_remainder_encodeBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm10B(SB), $4120-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000020, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm10B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm10B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm10B + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm10B + +repeat_extend_back_loop_encodeBlockAsm10B: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm10B + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm10B + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm10B + +repeat_extend_back_end_encodeBlockAsm10B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm10B + JB three_bytes_repeat_emit_encodeBlockAsm10B + +three_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +two_bytes_repeat_emit_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm10B + JMP memmove_long_repeat_emit_encodeBlockAsm10B + +one_byte_repeat_emit_encodeBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm10B + +memmove_long_repeat_emit_encodeBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm10B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm10B + +matchlen_loopback_repeat_extend_encodeBlockAsm10B: + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_loop_repeat_extend_encodeBlockAsm10B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JAE matchlen_loopback_repeat_extend_encodeBlockAsm10B + +matchlen_match4_repeat_extend_encodeBlockAsm10B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm10B + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm10B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm10B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm10B + JB repeat_extend_forward_end_encodeBlockAsm10B + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm10B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm10B + +matchlen_match1_repeat_extend_encodeBlockAsm10B: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm10B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm10B: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm10B + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm10B + CMPL DI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm10B + CMPL SI, $0x00000800 + JB repeat_two_offset_match_repeat_encodeBlockAsm10B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm10B: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm10B + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_match_repeat_encodeBlockAsm10B: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_match_repeat_encodeBlockAsm10B: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_match_repeat_encodeBlockAsm10B: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_as_copy_encodeBlockAsm10B: + // emitCopy + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm10B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short_2b: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +long_offset_short_repeat_as_copy_encodeBlockAsm10B: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat + MOVL BX, DI + LEAL -4(BX), BX + CMPL DI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL DI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + CMPL SI, $0x00000800 + JB repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_three_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +repeat_two_offset_repeat_as_copy_encodeBlockAsm10B_emit_copy_short: + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm10B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm10B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeBlockAsm10B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm10B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm10B + +no_repeat_found_encodeBlockAsm10B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm10B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm10B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm10B + +candidate3_match_encodeBlockAsm10B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm10B + +candidate2_match_encodeBlockAsm10B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm10B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm10B + +match_extend_back_loop_encodeBlockAsm10B: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm10B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm10B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm10B + JMP match_extend_back_loop_encodeBlockAsm10B + +match_extend_back_end_encodeBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm10B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm10B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm10B + JB three_bytes_match_emit_encodeBlockAsm10B + +three_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm10B + +two_bytes_match_emit_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm10B + JMP memmove_long_match_emit_encodeBlockAsm10B + +one_byte_match_emit_encodeBlockAsm10B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm10B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm10B + +emit_lit_memmove_match_emit_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm10B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm10B + +memmove_long_match_emit_encodeBlockAsm10B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm10B: +match_nolit_loop_encodeBlockAsm10B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm10B + +matchlen_loopback_match_nolit_encodeBlockAsm10B: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm10B + +matchlen_loop_match_nolit_encodeBlockAsm10B: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBlockAsm10B + +matchlen_match4_match_nolit_encodeBlockAsm10B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm10B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm10B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm10B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm10B + JB match_nolit_end_encodeBlockAsm10B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm10B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm10B + +matchlen_match1_match_nolit_encodeBlockAsm10B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm10B + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm10B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm10B + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm10B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +long_offset_short_match_nolit_encodeBlockAsm10B: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat + MOVL R9, SI + LEAL -4(R9), R9 + CMPL SI, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + CMPL BX, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_three_match_nolit_encodeBlockAsm10B_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_match_nolit_encodeBlockAsm10B_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +repeat_two_offset_match_nolit_encodeBlockAsm10B_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBlockAsm10B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm10B + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBlockAsm10B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm10B + +emit_copy_three_match_nolit_encodeBlockAsm10B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm10B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm10B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm10B: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x36, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x36, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm10B + INCL CX + JMP search_loop_encodeBlockAsm10B + +emit_remainder_encodeBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm10B + JB three_bytes_emit_remainder_encodeBlockAsm10B + +three_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +two_bytes_emit_remainder_encodeBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm10B + JMP memmove_long_emit_remainder_encodeBlockAsm10B + +one_byte_emit_remainder_encodeBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm10B + +memmove_long_emit_remainder_encodeBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBlockAsm8B(SB), $1048-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000008, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBlockAsm8B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBlockAsm8B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeBlockAsm8B + LEAL 1(CX), SI + MOVL 12(SP), DI + MOVL SI, BX + SUBL 16(SP), BX + JZ repeat_extend_back_end_encodeBlockAsm8B + +repeat_extend_back_loop_encodeBlockAsm8B: + CMPL SI, DI + JBE repeat_extend_back_end_encodeBlockAsm8B + MOVB -1(DX)(BX*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeBlockAsm8B + LEAL -1(SI), SI + DECL BX + JNZ repeat_extend_back_loop_encodeBlockAsm8B + +repeat_extend_back_end_encodeBlockAsm8B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeBlockAsm8B + JB three_bytes_repeat_emit_encodeBlockAsm8B + +three_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +two_bytes_repeat_emit_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeBlockAsm8B + JMP memmove_long_repeat_emit_encodeBlockAsm8B + +one_byte_repeat_emit_encodeBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_repeat_emit_encodeBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_repeat_emit_encodeBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeBlockAsm8B + +memmove_long_repeat_emit_encodeBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R11 + SHRQ $0x05, R11 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R12 + SUBQ R10, R12 + DECQ R11 + JA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R12*1), R10 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R10 + ADDQ $0x20, R12 + DECQ R11 + JNA emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R12*1), X4 + MOVOU -16(R9)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ R8, R12 + JAE emit_lit_memmove_long_repeat_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeBlockAsm8B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), R8 + SUBL CX, R8 + LEAQ (DX)(CX*1), R9 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R11, R11 + CMPL R8, $0x08 + JB matchlen_match4_repeat_extend_encodeBlockAsm8B + +matchlen_loopback_repeat_extend_encodeBlockAsm8B: + MOVQ (R9)(R11*1), R10 + XORQ (BX)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_repeat_extend_encodeBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_loop_repeat_extend_encodeBlockAsm8B: + LEAL -8(R8), R8 + LEAL 8(R11), R11 + CMPL R8, $0x08 + JAE matchlen_loopback_repeat_extend_encodeBlockAsm8B + +matchlen_match4_repeat_extend_encodeBlockAsm8B: + CMPL R8, $0x04 + JB matchlen_match2_repeat_extend_encodeBlockAsm8B + MOVL (R9)(R11*1), R10 + CMPL (BX)(R11*1), R10 + JNE matchlen_match2_repeat_extend_encodeBlockAsm8B + LEAL -4(R8), R8 + LEAL 4(R11), R11 + +matchlen_match2_repeat_extend_encodeBlockAsm8B: + CMPL R8, $0x01 + JE matchlen_match1_repeat_extend_encodeBlockAsm8B + JB repeat_extend_forward_end_encodeBlockAsm8B + MOVW (R9)(R11*1), R10 + CMPW (BX)(R11*1), R10 + JNE matchlen_match1_repeat_extend_encodeBlockAsm8B + LEAL 2(R11), R11 + SUBL $0x02, R8 + JZ repeat_extend_forward_end_encodeBlockAsm8B + +matchlen_match1_repeat_extend_encodeBlockAsm8B: + MOVB (R9)(R11*1), R10 + CMPB (BX)(R11*1), R10 + JNE repeat_extend_forward_end_encodeBlockAsm8B + LEAL 1(R11), R11 + +repeat_extend_forward_end_encodeBlockAsm8B: + ADDL R11, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + TESTL DI, DI + JZ repeat_as_copy_encodeBlockAsm8B + + // emitRepeat + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 + JBE repeat_two_match_repeat_encodeBlockAsm8B + CMPL SI, $0x0c + JAE cant_repeat_two_offset_match_repeat_encodeBlockAsm8B + +cant_repeat_two_offset_match_repeat_encodeBlockAsm8B: + CMPL BX, $0x00000104 + JB repeat_three_match_repeat_encodeBlockAsm8B + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_match_repeat_encodeBlockAsm8B: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_match_repeat_encodeBlockAsm8B: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_as_copy_encodeBlockAsm8B: + // emitCopy + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B + CMPL SI, $0x00000800 + JAE long_offset_short_repeat_as_copy_encodeBlockAsm8B + MOVL $0x00000001, DI + LEAL 16(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + SUBL $0x08, BX + + // emitRepeat + LEAL -4(BX), BX + JMP cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +long_offset_short_repeat_as_copy_encodeBlockAsm8B: + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + + // emitRepeat + MOVL BX, SI + LEAL -4(BX), BX + CMPL SI, $0x08 + JBE repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + CMPL BX, $0x00000104 + JB repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short + LEAL -256(BX), BX + MOVW $0x0019, (AX) + MOVW BX, 2(AX) + ADDQ $0x04, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_three_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + LEAL -4(BX), BX + MOVW $0x0015, (AX) + MOVB BL, 2(AX) + ADDQ $0x03, AX + JMP repeat_end_emit_encodeBlockAsm8B + +repeat_two_repeat_as_copy_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, BX + ORL $0x01, BX + MOVW BX, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + XORQ DI, DI + LEAL 1(DI)(BX*4), BX + MOVB SI, 1(AX) + SARL $0x08, SI + SHLL $0x05, SI + ORL SI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeBlockAsm8B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeBlockAsm8B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeBlockAsm8B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeBlockAsm8B: + MOVL CX, 12(SP) + JMP search_loop_encodeBlockAsm8B + +no_repeat_found_encodeBlockAsm8B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBlockAsm8B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeBlockAsm8B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBlockAsm8B + +candidate3_match_encodeBlockAsm8B: + ADDL $0x02, CX + JMP candidate_match_encodeBlockAsm8B + +candidate2_match_encodeBlockAsm8B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeBlockAsm8B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBlockAsm8B + +match_extend_back_loop_encodeBlockAsm8B: + CMPL CX, SI + JBE match_extend_back_end_encodeBlockAsm8B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBlockAsm8B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBlockAsm8B + JMP match_extend_back_loop_encodeBlockAsm8B + +match_extend_back_end_encodeBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBlockAsm8B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeBlockAsm8B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeBlockAsm8B + JB three_bytes_match_emit_encodeBlockAsm8B + +three_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBlockAsm8B + +two_bytes_match_emit_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeBlockAsm8B + JMP memmove_long_match_emit_encodeBlockAsm8B + +one_byte_match_emit_encodeBlockAsm8B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBlockAsm8B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBlockAsm8B + +emit_lit_memmove_match_emit_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBlockAsm8B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeBlockAsm8B + +memmove_long_match_emit_encodeBlockAsm8B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeBlockAsm8B: +match_nolit_loop_encodeBlockAsm8B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeBlockAsm8B + +matchlen_loopback_match_nolit_encodeBlockAsm8B: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeBlockAsm8B + +matchlen_loop_match_nolit_encodeBlockAsm8B: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBlockAsm8B + +matchlen_match4_match_nolit_encodeBlockAsm8B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeBlockAsm8B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeBlockAsm8B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeBlockAsm8B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeBlockAsm8B + JB match_nolit_end_encodeBlockAsm8B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeBlockAsm8B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeBlockAsm8B + +matchlen_match1_match_nolit_encodeBlockAsm8B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeBlockAsm8B + LEAL 1(R9), R9 + +match_nolit_end_encodeBlockAsm8B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBlockAsm8B + CMPL BX, $0x00000800 + JAE long_offset_short_match_nolit_encodeBlockAsm8B + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + SUBL $0x08, R9 + + // emitRepeat + LEAL -4(R9), R9 + JMP cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + MOVL R9, BX + LEAL -4(R9), R9 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +long_offset_short_match_nolit_encodeBlockAsm8B: + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + + // emitRepeat + MOVL R9, BX + LEAL -4(R9), R9 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBlockAsm8B_emit_copy_short: + CMPL R9, $0x00000104 + JB repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short + LEAL -256(R9), R9 + MOVW $0x0019, (AX) + MOVW R9, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_three_match_nolit_encodeBlockAsm8B_emit_copy_short: + LEAL -4(R9), R9 + MOVW $0x0015, (AX) + MOVB R9, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +repeat_two_match_nolit_encodeBlockAsm8B_emit_copy_short: + SHLL $0x02, R9 + ORL $0x01, R9 + MOVW R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + XORQ SI, SI + LEAL 1(SI)(R9*4), R9 + MOVB BL, 1(AX) + SARL $0x08, BX + SHLL $0x05, BX + ORL BX, R9 + MOVB R9, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBlockAsm8B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeBlockAsm8B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBlockAsm8B + +emit_copy_three_match_nolit_encodeBlockAsm8B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeBlockAsm8B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBlockAsm8B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBlockAsm8B: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x38, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x38, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeBlockAsm8B + INCL CX + JMP search_loop_encodeBlockAsm8B + +emit_remainder_encodeBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBlockAsm8B + JB three_bytes_emit_remainder_encodeBlockAsm8B + +three_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +two_bytes_emit_remainder_encodeBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBlockAsm8B + JMP memmove_long_emit_remainder_encodeBlockAsm8B + +one_byte_emit_remainder_encodeBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBlockAsm8B + +memmove_long_emit_remainder_encodeBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm(SB), $589848-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00001200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 + JBE check_maxskip_ok_encodeBetterBlockAsm + LEAL 100(CX), BX + JMP check_maxskip_cont_encodeBetterBlockAsm + +check_maxskip_ok_encodeBetterBlockAsm: + LEAL 1(CX)(BX*1), BX + +check_maxskip_cont_encodeBetterBlockAsm: + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm + +no_short_found_encodeBetterBlockAsm: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm + +candidateS_match_encodeBetterBlockAsm: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm + +match_extend_back_loop_encodeBetterBlockAsm: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm + JMP match_extend_back_loop_encodeBetterBlockAsm + +match_extend_back_end_encodeBetterBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm + +matchlen_loopback_match_nolit_encodeBetterBlockAsm: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm + +matchlen_loop_match_nolit_encodeBetterBlockAsm: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm + +matchlen_match4_match_nolit_encodeBetterBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm + JB match_nolit_end_encodeBetterBlockAsm + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm + +matchlen_match1_match_nolit_encodeBetterBlockAsm: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm + CMPL R11, $0x01 + JA match_length_ok_encodeBetterBlockAsm + CMPL DI, $0x0000ffff + JBE match_length_ok_encodeBetterBlockAsm + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeBetterBlockAsm + +match_length_ok_encodeBetterBlockAsm: + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_match_emit_encodeBetterBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_match_emit_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +four_bytes_match_emit_encodeBetterBlockAsm: + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +three_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm + +two_bytes_match_emit_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm + JMP memmove_long_match_emit_encodeBetterBlockAsm + +one_byte_match_emit_encodeBetterBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm + +memmove_long_match_emit_encodeBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_match_nolit_encodeBetterBlockAsm + CMPL R11, $0x40 + JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(R11), R11 + ADDQ $0x05, AX + CMPL R11, $0x04 + JB four_bytes_remain_match_nolit_encodeBetterBlockAsm + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy: + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy + CMPL R11, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +four_bytes_remain_match_nolit_encodeBetterBlockAsm: + TESTL R11, R11 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_match_nolit_encodeBetterBlockAsm: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + MOVL DI, R8 + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + CMPL R11, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short_2b: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +long_offset_short_match_nolit_encodeBetterBlockAsm: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short: + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short + CMPL R11, $0x0100ffff + JB repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_encodeBetterBlockAsm_emit_copy_short + +repeat_five_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_encodeBetterBlockAsm_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_encodeBetterBlockAsm_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_encodeBetterBlockAsm_emit_copy_short: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +emit_copy_three_match_nolit_encodeBetterBlockAsm: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +match_is_repeat_encodeBetterBlockAsm: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_match_emit_repeat_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +four_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +three_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +two_bytes_match_emit_repeat_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm + +one_byte_match_emit_repeat_encodeBetterBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm + +memmove_long_match_emit_repeat_encodeBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat +emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm: + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm + CMPL R11, $0x0100ffff + JB repeat_five_match_nolit_repeat_encodeBetterBlockAsm + LEAL -16842747(R11), R11 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_match_nolit_repeat_encodeBetterBlockAsm + +repeat_five_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x32, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x08, R10 + IMULQ BX, R10 + SHRQ $0x2f, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm + +emit_remainder_encodeBetterBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBetterBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeBetterBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +four_bytes_emit_remainder_encodeBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +three_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +two_bytes_emit_remainder_encodeBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm + JMP memmove_long_emit_remainder_encodeBetterBlockAsm + +one_byte_emit_remainder_encodeBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm + +memmove_long_emit_remainder_encodeBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm4MB(SB), $589848-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00001200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm4MB: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm4MB + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm4MB: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 + JBE check_maxskip_ok_encodeBetterBlockAsm4MB + LEAL 100(CX), BX + JMP check_maxskip_cont_encodeBetterBlockAsm4MB + +check_maxskip_ok_encodeBetterBlockAsm4MB: + LEAL 1(CX)(BX*1), BX + +check_maxskip_cont_encodeBetterBlockAsm4MB: + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm4MB + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm4MB + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm4MB + +no_short_found_encodeBetterBlockAsm4MB: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm4MB + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm4MB + +candidateS_match_encodeBetterBlockAsm4MB: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm4MB + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm4MB: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm4MB + +match_extend_back_loop_encodeBetterBlockAsm4MB: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm4MB + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm4MB + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm4MB + JMP match_extend_back_loop_encodeBetterBlockAsm4MB + +match_extend_back_end_encodeBetterBlockAsm4MB: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 4(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm4MB: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm4MB + +matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_loop_match_nolit_encodeBetterBlockAsm4MB: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB + +matchlen_match4_match_nolit_encodeBetterBlockAsm4MB: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm4MB: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + JB match_nolit_end_encodeBetterBlockAsm4MB + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm4MB + +matchlen_match1_match_nolit_encodeBetterBlockAsm4MB: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm4MB + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm4MB: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm4MB + CMPL R11, $0x01 + JA match_length_ok_encodeBetterBlockAsm4MB + CMPL DI, $0x0000ffff + JBE match_length_ok_encodeBetterBlockAsm4MB + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeBetterBlockAsm4MB + +match_length_ok_encodeBetterBlockAsm4MB: + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm4MB + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm4MB + CMPL BX, $0x00010000 + JB three_bytes_match_emit_encodeBetterBlockAsm4MB + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +three_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +two_bytes_match_emit_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_encodeBetterBlockAsm4MB + +one_byte_match_emit_encodeBetterBlockAsm4MB: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm4MB: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm4MB + +memmove_long_match_emit_encodeBetterBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm4MB: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_match_nolit_encodeBetterBlockAsm4MB + CMPL R11, $0x40 + JBE four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(R11), R11 + ADDQ $0x05, AX + CMPL R11, $0x04 + JB four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +four_bytes_remain_match_nolit_encodeBetterBlockAsm4MB: + TESTL R11, R11 + JZ match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_match_nolit_encodeBetterBlockAsm4MB: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm4MB + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short_2b: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +long_offset_short_match_nolit_encodeBetterBlockAsm4MB: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_encodeBetterBlockAsm4MB_emit_copy_short: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm4MB: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm4MB + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +emit_copy_three_match_nolit_encodeBetterBlockAsm4MB: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +match_is_repeat_encodeBetterBlockAsm4MB: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + CMPL BX, $0x00010000 + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +three_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +two_bytes_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm4MB + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB + +one_byte_match_emit_repeat_encodeBetterBlockAsm4MB: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm4MB: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB + +memmove_long_match_emit_repeat_encodeBetterBlockAsm4MB: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm4MB: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB + CMPL R11, $0x00010100 + JB repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB + LEAL -65536(R11), R11 + MOVL R11, DI + MOVW $0x001d, (AX) + MOVW R11, 2(AX) + SARL $0x10, DI + MOVB DI, 4(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_four_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm4MB: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm4MB: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm4MB + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm4MB: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm4MB: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm4MB + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm4MB: + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x32, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm4MB: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm4MB + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x08, R10 + IMULQ BX, R10 + SHRQ $0x2f, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm4MB + +emit_remainder_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 4(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm4MB + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm4MB: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm4MB + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeBetterBlockAsm4MB + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +three_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +two_bytes_emit_remainder_encodeBetterBlockAsm4MB: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm4MB + JMP memmove_long_emit_remainder_encodeBetterBlockAsm4MB + +one_byte_emit_remainder_encodeBetterBlockAsm4MB: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm4MB_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB + +memmove_long_emit_remainder_encodeBetterBlockAsm4MB: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm4MBlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm12B(SB), $81944-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000280, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm12B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm12B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 65560(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 65560(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm12B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm12B + +no_short_found_encodeBetterBlockAsm12B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm12B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm12B + +candidateS_match_encodeBetterBlockAsm12B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm12B + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm12B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm12B + +match_extend_back_loop_encodeBetterBlockAsm12B: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm12B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm12B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm12B + JMP match_extend_back_loop_encodeBetterBlockAsm12B + +match_extend_back_end_encodeBetterBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm12B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm12B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm12B: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm12B + +matchlen_loop_match_nolit_encodeBetterBlockAsm12B: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeBetterBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm12B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + JB match_nolit_end_encodeBetterBlockAsm12B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm12B + +matchlen_match1_match_nolit_encodeBetterBlockAsm12B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm12B + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm12B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm12B + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm12B + JB three_bytes_match_emit_encodeBetterBlockAsm12B + +three_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +two_bytes_match_emit_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_encodeBetterBlockAsm12B + +one_byte_match_emit_encodeBetterBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm12B + +memmove_long_match_emit_encodeBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm12B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm12B + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short_2b: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +long_offset_short_match_nolit_encodeBetterBlockAsm12B: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm12B_emit_copy_short: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm12B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm12B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeBetterBlockAsm12B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +match_is_repeat_encodeBetterBlockAsm12B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm12B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm12B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm12B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm12B + +one_byte_match_emit_repeat_encodeBetterBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm12B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm12B: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm12B: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm12B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm12B: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm12B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm12B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x34, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 65560(SP)(R10*4) + MOVL R13, 65560(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm12B: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm12B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm12B + +emit_remainder_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm12B + JB three_bytes_emit_remainder_encodeBetterBlockAsm12B + +three_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +two_bytes_emit_remainder_encodeBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm12B + +one_byte_emit_remainder_encodeBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm12B + +memmove_long_emit_remainder_encodeBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm10B(SB), $20504-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x000000a0, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm10B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm10B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 16408(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 16408(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm10B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm10B + +no_short_found_encodeBetterBlockAsm10B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm10B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm10B + +candidateS_match_encodeBetterBlockAsm10B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm10B + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm10B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm10B + +match_extend_back_loop_encodeBetterBlockAsm10B: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm10B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm10B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm10B + JMP match_extend_back_loop_encodeBetterBlockAsm10B + +match_extend_back_end_encodeBetterBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm10B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm10B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm10B: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm10B + +matchlen_loop_match_nolit_encodeBetterBlockAsm10B: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeBetterBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm10B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + JB match_nolit_end_encodeBetterBlockAsm10B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm10B + +matchlen_match1_match_nolit_encodeBetterBlockAsm10B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm10B + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm10B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm10B + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm10B + JB three_bytes_match_emit_encodeBetterBlockAsm10B + +three_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +two_bytes_match_emit_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_encodeBetterBlockAsm10B + +one_byte_match_emit_encodeBetterBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm10B + +memmove_long_match_emit_encodeBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm10B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm10B + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short_2b: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +long_offset_short_match_nolit_encodeBetterBlockAsm10B: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_encodeBetterBlockAsm10B_emit_copy_short: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm10B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm10B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeBetterBlockAsm10B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +match_is_repeat_encodeBetterBlockAsm10B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm10B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm10B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm10B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm10B + +one_byte_match_emit_repeat_encodeBetterBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm10B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + CMPL DI, $0x00000800 + JB repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm10B: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm10B: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm10B + +repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm10B: + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm10B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm10B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x36, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 16408(SP)(R10*4) + MOVL R13, 16408(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm10B: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm10B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm10B + +emit_remainder_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm10B + JB three_bytes_emit_remainder_encodeBetterBlockAsm10B + +three_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +two_bytes_emit_remainder_encodeBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm10B + +one_byte_emit_remainder_encodeBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm10B + +memmove_long_emit_remainder_encodeBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeBetterBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeBetterBlockAsm8B(SB), $5144-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000028, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeBetterBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -6(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeBetterBlockAsm8B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm8B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 4120(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 4120(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPQ R10, SI + JNE no_short_found_encodeBetterBlockAsm8B + MOVL DI, BX + JMP candidate_match_encodeBetterBlockAsm8B + +no_short_found_encodeBetterBlockAsm8B: + CMPL R9, SI + JEQ candidate_match_encodeBetterBlockAsm8B + CMPL R10, SI + JEQ candidateS_match_encodeBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeBetterBlockAsm8B + +candidateS_match_encodeBetterBlockAsm8B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeBetterBlockAsm8B + DECL CX + MOVL DI, BX + +candidate_match_encodeBetterBlockAsm8B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeBetterBlockAsm8B + +match_extend_back_loop_encodeBetterBlockAsm8B: + CMPL CX, SI + JBE match_extend_back_end_encodeBetterBlockAsm8B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeBetterBlockAsm8B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeBetterBlockAsm8B + JMP match_extend_back_loop_encodeBetterBlockAsm8B + +match_extend_back_end_encodeBetterBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeBetterBlockAsm8B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeBetterBlockAsm8B + +matchlen_loopback_match_nolit_encodeBetterBlockAsm8B: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeBetterBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeBetterBlockAsm8B + +matchlen_loop_match_nolit_encodeBetterBlockAsm8B: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeBetterBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeBetterBlockAsm8B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeBetterBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + JB match_nolit_end_encodeBetterBlockAsm8B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeBetterBlockAsm8B + +matchlen_match1_match_nolit_encodeBetterBlockAsm8B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeBetterBlockAsm8B + LEAL 1(R11), R11 + +match_nolit_end_encodeBetterBlockAsm8B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL 16(SP), DI + JEQ match_is_repeat_encodeBetterBlockAsm8B + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeBetterBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeBetterBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeBetterBlockAsm8B + JB three_bytes_match_emit_encodeBetterBlockAsm8B + +three_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +two_bytes_match_emit_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_encodeBetterBlockAsm8B + +one_byte_match_emit_encodeBetterBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeBetterBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x04 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ R8, $0x08 + JB emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R9), R10 + MOVL R10, (AX) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R9), R10 + MOVL -4(R9)(R8*1), R9 + MOVL R10, (AX) + MOVL R9, -4(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeBetterBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeBetterBlockAsm8B + +memmove_long_match_emit_encodeBetterBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeBetterBlockAsm8B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B + CMPL DI, $0x00000800 + JAE long_offset_short_match_nolit_encodeBetterBlockAsm8B + MOVL $0x00000001, BX + LEAL 16(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + SUBL $0x08, R11 + + // emitRepeat + LEAL -4(R11), R11 + JMP cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short_2b: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +long_offset_short_match_nolit_encodeBetterBlockAsm8B: + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + +cant_repeat_two_offset_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_encodeBetterBlockAsm8B_emit_copy_short: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeBetterBlockAsm8B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeBetterBlockAsm8B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeBetterBlockAsm8B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +match_is_repeat_encodeBetterBlockAsm8B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_match_emit_repeat_encodeBetterBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_repeat_encodeBetterBlockAsm8B + JB three_bytes_match_emit_repeat_encodeBetterBlockAsm8B + +three_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +two_bytes_match_emit_repeat_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_repeat_encodeBetterBlockAsm8B + JMP memmove_long_match_emit_repeat_encodeBetterBlockAsm8B + +one_byte_match_emit_repeat_encodeBetterBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x04 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4 + CMPQ DI, $0x08 + JB emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ DI, $0x10 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4: + MOVL (R8), R9 + MOVL R9, (AX) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (R8), R9 + MOVL -4(R8)(DI*1), R8 + MOVL R9, (AX) + MOVL R8, -4(AX)(DI*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B + +emit_lit_memmove_match_emit_repeat_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_match_emit_repeat_encodeBetterBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B + +memmove_long_match_emit_repeat_encodeBetterBlockAsm8B: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R12 + SUBQ R9, R12 + DECQ R10 + JA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R8)(R12*1), R9 + LEAQ -32(AX)(R12*1), R13 + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R13) + MOVOA X5, 16(R13) + ADDQ $0x20, R13 + ADDQ $0x20, R9 + ADDQ $0x20, R12 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R8)(R12*1), X4 + MOVOU -16(R8)(R12*1), X5 + MOVOA X4, -32(AX)(R12*1) + MOVOA X5, -16(AX)(R12*1) + ADDQ $0x20, R12 + CMPQ DI, R12 + JAE emit_lit_memmove_long_match_emit_repeat_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_match_emit_repeat_encodeBetterBlockAsm8B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitRepeat + MOVL R11, BX + LEAL -4(R11), R11 + CMPL BX, $0x08 + JBE repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B + CMPL BX, $0x0c + JAE cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B + +cant_repeat_two_offset_match_nolit_repeat_encodeBetterBlockAsm8B: + CMPL R11, $0x00000104 + JB repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B + LEAL -256(R11), R11 + MOVW $0x0019, (AX) + MOVW R11, 2(AX) + ADDQ $0x04, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_three_match_nolit_repeat_encodeBetterBlockAsm8B: + LEAL -4(R11), R11 + MOVW $0x0015, (AX) + MOVB R11, 2(AX) + ADDQ $0x03, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + +repeat_two_match_nolit_repeat_encodeBetterBlockAsm8B: + SHLL $0x02, R11 + ORL $0x01, R11 + MOVW R11, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeBetterBlockAsm8B + XORQ BX, BX + LEAL 1(BX)(R11*4), R11 + MOVB DI, 1(AX) + SARL $0x08, DI + SHLL $0x05, DI + ORL DI, R11 + MOVB R11, (AX) + ADDQ $0x02, AX + +match_nolit_emitcopy_end_encodeBetterBlockAsm8B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeBetterBlockAsm8B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x38, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x36, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x38, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 4120(SP)(R10*4) + MOVL R13, 4120(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeBetterBlockAsm8B: + CMPQ DI, R8 + JAE search_loop_encodeBetterBlockAsm8B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeBetterBlockAsm8B + +emit_remainder_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeBetterBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeBetterBlockAsm8B + JB three_bytes_emit_remainder_encodeBetterBlockAsm8B + +three_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +two_bytes_emit_remainder_encodeBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeBetterBlockAsm8B + +one_byte_emit_remainder_encodeBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeBetterBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeBetterBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeBetterBlockAsm8B + +memmove_long_emit_remainder_encodeBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeBetterBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm + +repeat_extend_back_loop_encodeSnappyBlockAsm: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm + +repeat_extend_back_end_encodeSnappyBlockAsm: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_repeat_emit_encodeSnappyBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_repeat_emit_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +four_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVL BX, R9 + SHRL $0x10, R9 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R9, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +three_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +two_bytes_repeat_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm + +one_byte_repeat_emit_encodeSnappyBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm + +memmove_long_repeat_emit_encodeSnappyBlockAsm: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + JB repeat_extend_forward_end_encodeSnappyBlockAsm + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm: + CMPL BX, $0x40 + JBE four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xff, (AX) + MOVL SI, 1(AX) + LEAL -64(BX), BX + ADDQ $0x05, AX + CMPL BX, $0x04 + JB four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm + JMP four_bytes_loop_back_repeat_as_copy_encodeSnappyBlockAsm + +four_bytes_remain_repeat_as_copy_encodeSnappyBlockAsm: + TESTL BX, BX + JZ repeat_end_emit_encodeSnappyBlockAsm + XORL DI, DI + LEAL -1(DI)(BX*4), BX + MOVB BL, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, AX + JMP repeat_end_emit_encodeSnappyBlockAsm + +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm + +no_repeat_found_encodeSnappyBlockAsm: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm + +candidate3_match_encodeSnappyBlockAsm: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm + +candidate2_match_encodeSnappyBlockAsm: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm + +match_extend_back_loop_encodeSnappyBlockAsm: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm + JMP match_extend_back_loop_encodeSnappyBlockAsm + +match_extend_back_end_encodeSnappyBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm + CMPL DI, $0x00010000 + JB three_bytes_match_emit_encodeSnappyBlockAsm + CMPL DI, $0x01000000 + JB four_bytes_match_emit_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +four_bytes_match_emit_encodeSnappyBlockAsm: + MOVL DI, R9 + SHRL $0x10, R9 + MOVB $0xf8, (AX) + MOVW DI, 1(AX) + MOVB R9, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +three_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +two_bytes_match_emit_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm + JMP memmove_long_match_emit_encodeSnappyBlockAsm + +one_byte_match_emit_encodeSnappyBlockAsm: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm + +memmove_long_match_emit_encodeSnappyBlockAsm: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm: +match_nolit_loop_encodeSnappyBlockAsm: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm + +matchlen_loop_match_nolit_encodeSnappyBlockAsm: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBlockAsm: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm + JB match_nolit_end_encodeSnappyBlockAsm + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm + +matchlen_match1_match_nolit_encodeSnappyBlockAsm: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JB two_byte_offset_match_nolit_encodeSnappyBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm: + CMPL R9, $0x40 + JBE four_bytes_remain_match_nolit_encodeSnappyBlockAsm + MOVB $0xff, (AX) + MOVL BX, 1(AX) + LEAL -64(R9), R9 + ADDQ $0x05, AX + CMPL R9, $0x04 + JB four_bytes_remain_match_nolit_encodeSnappyBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBlockAsm: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_encodeSnappyBlockAsm + XORL SI, SI + LEAL -1(SI)(R9*4), R9 + MOVB R9, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBlockAsm: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBlockAsm: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm + INCL CX + JMP search_loop_encodeSnappyBlockAsm + +emit_remainder_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeSnappyBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeSnappyBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +four_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +three_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +two_bytes_emit_remainder_encodeSnappyBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm + +one_byte_emit_remainder_encodeSnappyBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm + +memmove_long_emit_remainder_encodeSnappyBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm64K: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm64K: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm64K + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm64K + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm64K + +repeat_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm64K + +repeat_extend_back_end_encodeSnappyBlockAsm64K: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm64K + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm64K + JB three_bytes_repeat_emit_encodeSnappyBlockAsm64K + +three_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +two_bytes_repeat_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm64K + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm64K + +one_byte_repeat_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm64K: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K + +memmove_long_repeat_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + JB repeat_extend_forward_end_encodeSnappyBlockAsm64K + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm64K: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm64K + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm64K: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm64K + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm64K: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm64K: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm64K + +no_repeat_found_encodeSnappyBlockAsm64K: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm64K + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm64K + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm64K + +candidate3_match_encodeSnappyBlockAsm64K: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm64K + +candidate2_match_encodeSnappyBlockAsm64K: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm64K: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm64K + +match_extend_back_loop_encodeSnappyBlockAsm64K: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm64K + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm64K + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBlockAsm64K + +match_extend_back_end_encodeSnappyBlockAsm64K: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm64K: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm64K + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm64K + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm64K + JB three_bytes_match_emit_encodeSnappyBlockAsm64K + +three_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +two_bytes_match_emit_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBlockAsm64K + +one_byte_match_emit_encodeSnappyBlockAsm64K: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm64K: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm64K + +memmove_long_match_emit_encodeSnappyBlockAsm64K: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm64K: +match_nolit_loop_encodeSnappyBlockAsm64K: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm64K + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_loop_match_nolit_encodeSnappyBlockAsm64K: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBlockAsm64K: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm64K: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + JB match_nolit_end_encodeSnappyBlockAsm64K + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm64K + +matchlen_match1_match_nolit_encodeSnappyBlockAsm64K: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm64K + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm64K: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm64K: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm64K: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm64K + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBlockAsm64K: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm64K: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm64K + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm64K: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x32, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x32, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm64K + INCL CX + JMP search_loop_encodeSnappyBlockAsm64K + +emit_remainder_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm64K: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm64K + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm64K + JB three_bytes_emit_remainder_encodeSnappyBlockAsm64K + +three_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm64K_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000080, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm12B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm12B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x18, R10 + IMULQ R8, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x18, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm12B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm12B + +repeat_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm12B + +repeat_extend_back_end_encodeSnappyBlockAsm12B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm12B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm12B + +three_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +two_bytes_repeat_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm12B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm12B + +one_byte_repeat_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B + +memmove_long_repeat_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + JB repeat_extend_forward_end_encodeSnappyBlockAsm12B + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm12B: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm12B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm12B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm12B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm12B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm12B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm12B + +no_repeat_found_encodeSnappyBlockAsm12B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm12B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm12B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm12B + +candidate3_match_encodeSnappyBlockAsm12B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm12B + +candidate2_match_encodeSnappyBlockAsm12B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm12B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm12B + +match_extend_back_loop_encodeSnappyBlockAsm12B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm12B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm12B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBlockAsm12B + +match_extend_back_end_encodeSnappyBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm12B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm12B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm12B + JB three_bytes_match_emit_encodeSnappyBlockAsm12B + +three_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +two_bytes_match_emit_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBlockAsm12B + +one_byte_match_emit_encodeSnappyBlockAsm12B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm12B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm12B + +memmove_long_match_emit_encodeSnappyBlockAsm12B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm12B: +match_nolit_loop_encodeSnappyBlockAsm12B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm12B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm12B: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm12B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm12B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + JB match_nolit_end_encodeSnappyBlockAsm12B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm12B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm12B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm12B + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm12B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm12B: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm12B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm12B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm12B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm12B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm12B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm12B: + MOVQ $0x000000cf1bbcdcbb, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x18, DI + IMULQ R8, DI + SHRQ $0x34, DI + SHLQ $0x18, BX + IMULQ R8, BX + SHRQ $0x34, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm12B + INCL CX + JMP search_loop_encodeSnappyBlockAsm12B + +emit_remainder_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm12B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm12B + +three_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000020, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm10B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm10B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm10B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm10B + +repeat_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm10B + +repeat_extend_back_end_encodeSnappyBlockAsm10B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm10B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm10B + +three_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +two_bytes_repeat_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm10B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm10B + +one_byte_repeat_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B + +memmove_long_repeat_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + JB repeat_extend_forward_end_encodeSnappyBlockAsm10B + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm10B: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm10B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm10B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm10B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm10B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm10B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm10B + +no_repeat_found_encodeSnappyBlockAsm10B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm10B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm10B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm10B + +candidate3_match_encodeSnappyBlockAsm10B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm10B + +candidate2_match_encodeSnappyBlockAsm10B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm10B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm10B + +match_extend_back_loop_encodeSnappyBlockAsm10B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm10B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm10B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBlockAsm10B + +match_extend_back_end_encodeSnappyBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm10B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm10B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm10B + JB three_bytes_match_emit_encodeSnappyBlockAsm10B + +three_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +two_bytes_match_emit_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBlockAsm10B + +one_byte_match_emit_encodeSnappyBlockAsm10B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm10B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm10B + +memmove_long_match_emit_encodeSnappyBlockAsm10B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm10B: +match_nolit_loop_encodeSnappyBlockAsm10B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm10B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm10B: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm10B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm10B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + JB match_nolit_end_encodeSnappyBlockAsm10B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm10B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm10B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm10B + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm10B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm10B: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm10B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm10B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm10B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm10B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm10B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm10B: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x36, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x36, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm10B + INCL CX + JMP search_loop_encodeSnappyBlockAsm10B + +emit_remainder_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm10B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm10B + +three_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000008, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBlockAsm8B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm8B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x38, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_encodeSnappyBlockAsm8B + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_encodeSnappyBlockAsm8B + +repeat_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL SI, BX + JBE repeat_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_encodeSnappyBlockAsm8B + +repeat_extend_back_end_encodeSnappyBlockAsm8B: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_encodeSnappyBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_encodeSnappyBlockAsm8B + JB three_bytes_repeat_emit_encodeSnappyBlockAsm8B + +three_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +two_bytes_repeat_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_encodeSnappyBlockAsm8B + JMP memmove_long_repeat_emit_encodeSnappyBlockAsm8B + +one_byte_repeat_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(DI*1), BX + + // genMemMoveShort + CMPQ DI, $0x08 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ DI, $0x10 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ DI, $0x20 + JBE emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (R8), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (R8), R9 + MOVQ -8(R8)(DI*1), R8 + MOVQ R9, (AX) + MOVQ R8, -8(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (R8), X0 + MOVOU -16(R8)(DI*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DI*1) + JMP memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_repeat_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + +memmove_end_copy_repeat_emit_encodeSnappyBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B + +memmove_long_repeat_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(DI*1), BX + + // genMemMoveLong + MOVOU (R8), X0 + MOVOU 16(R8), X1 + MOVOU -32(R8)(DI*1), X2 + MOVOU -16(R8)(DI*1), X3 + MOVQ DI, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R8)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R8)(R11*1), X4 + MOVOU -16(R8)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ DI, R11 + JAE emit_lit_memmove_long_repeat_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DI*1) + MOVOU X3, -16(AX)(DI*1) + MOVQ BX, AX + +emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JAE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B + +matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + JB repeat_extend_forward_end_encodeSnappyBlockAsm8B + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B + +matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B + LEAL 1(R10), R10 + +repeat_extend_forward_end_encodeSnappyBlockAsm8B: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B + MOVB $0xee, (AX) + MOVW SI, 1(AX) + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_encodeSnappyBlockAsm8B + +two_byte_offset_short_repeat_as_copy_encodeSnappyBlockAsm8B: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B + LEAL -15(DI), DI + MOVB SI, 1(AX) + SHRL $0x08, SI + SHLL $0x05, SI + ORL SI, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP repeat_end_emit_encodeSnappyBlockAsm8B + +emit_copy_three_repeat_as_copy_encodeSnappyBlockAsm8B: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, AX + +repeat_end_emit_encodeSnappyBlockAsm8B: + MOVL CX, 12(SP) + JMP search_loop_encodeSnappyBlockAsm8B + +no_repeat_found_encodeSnappyBlockAsm8B: + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBlockAsm8B + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_encodeSnappyBlockAsm8B + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_encodeSnappyBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBlockAsm8B + +candidate3_match_encodeSnappyBlockAsm8B: + ADDL $0x02, CX + JMP candidate_match_encodeSnappyBlockAsm8B + +candidate2_match_encodeSnappyBlockAsm8B: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_encodeSnappyBlockAsm8B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBlockAsm8B + +match_extend_back_loop_encodeSnappyBlockAsm8B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBlockAsm8B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBlockAsm8B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBlockAsm8B + +match_extend_back_end_encodeSnappyBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBlockAsm8B: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_encodeSnappyBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), DI + CMPL DI, $0x3c + JB one_byte_match_emit_encodeSnappyBlockAsm8B + CMPL DI, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBlockAsm8B + JB three_bytes_match_emit_encodeSnappyBlockAsm8B + +three_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +two_bytes_match_emit_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DI, 1(AX) + ADDQ $0x02, AX + CMPL DI, $0x40 + JB memmove_match_emit_encodeSnappyBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBlockAsm8B + +one_byte_match_emit_encodeSnappyBlockAsm8B: + SHLB $0x02, DI + MOVB DI, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R8*1), DI + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8: + MOVQ (SI), R9 + MOVQ R9, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (SI), R9 + MOVQ -8(SI)(R8*1), SI + MOVQ R9, (AX) + MOVQ SI, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (SI), X0 + MOVOU -16(SI)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBlockAsm8B: + MOVQ DI, AX + JMP emit_literal_done_match_emit_encodeSnappyBlockAsm8B + +memmove_long_match_emit_encodeSnappyBlockAsm8B: + LEAQ (AX)(R8*1), DI + + // genMemMoveLong + MOVOU (SI), X0 + MOVOU 16(SI), X1 + MOVOU -32(SI)(R8*1), X2 + MOVOU -16(SI)(R8*1), X3 + MOVQ R8, R10 + SHRQ $0x05, R10 + MOVQ AX, R9 + ANDL $0x0000001f, R9 + MOVQ $0x00000040, R11 + SUBQ R9, R11 + DECQ R10 + JA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(SI)(R11*1), R9 + LEAQ -32(AX)(R11*1), R12 + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (R9), X4 + MOVOU 16(R9), X5 + MOVOA X4, (R12) + MOVOA X5, 16(R12) + ADDQ $0x20, R12 + ADDQ $0x20, R9 + ADDQ $0x20, R11 + DECQ R10 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(SI)(R11*1), X4 + MOVOU -16(SI)(R11*1), X5 + MOVOA X4, -32(AX)(R11*1) + MOVOA X5, -16(AX)(R11*1) + ADDQ $0x20, R11 + CMPQ R8, R11 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ DI, AX + +emit_literal_done_match_emit_encodeSnappyBlockAsm8B: +match_nolit_loop_encodeSnappyBlockAsm8B: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBlockAsm8B + +matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_loop_match_nolit_encodeSnappyBlockAsm8B: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBlockAsm8B: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_encodeSnappyBlockAsm8B: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + JB match_nolit_end_encodeSnappyBlockAsm8B + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_encodeSnappyBlockAsm8B + +matchlen_match1_match_nolit_encodeSnappyBlockAsm8B: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_encodeSnappyBlockAsm8B + LEAL 1(R9), R9 + +match_nolit_end_encodeSnappyBlockAsm8B: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBlockAsm8B: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B + MOVB $0xee, (AX) + MOVW BX, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBlockAsm8B: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBlockAsm8B + LEAL -15(SI), SI + MOVB BL, 1(AX) + SHRL $0x08, BX + SHLL $0x05, BX + ORL BX, SI + MOVB SI, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBlockAsm8B: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBlockAsm8B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBlockAsm8B + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBlockAsm8B: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x38, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x38, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_encodeSnappyBlockAsm8B + INCL CX + JMP search_loop_encodeSnappyBlockAsm8B + +emit_remainder_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBlockAsm8B + JB three_bytes_emit_remainder_encodeSnappyBlockAsm8B + +three_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm(SB), $589848-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00001200, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + CMPL BX, $0x63 + JBE check_maxskip_ok_encodeSnappyBetterBlockAsm + LEAL 100(CX), BX + JMP check_maxskip_cont_encodeSnappyBetterBlockAsm + +check_maxskip_ok_encodeSnappyBetterBlockAsm: + LEAL 1(CX)(BX*1), BX + +check_maxskip_cont_encodeSnappyBetterBlockAsm: + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 524312(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 524312(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm + +no_short_found_encodeSnappyBetterBlockAsm: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm + +candidateS_match_encodeSnappyBetterBlockAsm: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x2f, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + +match_extend_back_loop_encodeSnappyBetterBlockAsm: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm + +match_extend_back_end_encodeSnappyBetterBlockAsm: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + JB match_nolit_end_encodeSnappyBetterBlockAsm + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + CMPL R11, $0x01 + JA match_length_ok_encodeSnappyBetterBlockAsm + CMPL DI, $0x0000ffff + JBE match_length_ok_encodeSnappyBetterBlockAsm + MOVL 20(SP), CX + INCL CX + JMP search_loop_encodeSnappyBetterBlockAsm + +match_length_ok_encodeSnappyBetterBlockAsm: + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL BX, $0x00010000 + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm + CMPL BX, $0x01000000 + JB four_bytes_match_emit_encodeSnappyBetterBlockAsm + MOVB $0xfc, (AX) + MOVL BX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +four_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVL BX, R10 + SHRL $0x10, R10 + MOVB $0xf8, (AX) + MOVW BX, 1(AX) + MOVB R10, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +three_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +two_bytes_match_emit_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm + +one_byte_match_emit_encodeSnappyBetterBlockAsm: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm + +memmove_long_match_emit_encodeSnappyBetterBlockAsm: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy + CMPL DI, $0x00010000 + JB two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R11, $0x40 + JBE four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xff, (AX) + MOVL DI, 1(AX) + LEAL -64(R11), R11 + ADDQ $0x05, AX + CMPL R11, $0x04 + JB four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm + JMP four_bytes_loop_back_match_nolit_encodeSnappyBetterBlockAsm + +four_bytes_remain_match_nolit_encodeSnappyBetterBlockAsm: + TESTL R11, R11 + JZ match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + XORL BX, BX + LEAL -1(BX)(R11*4), R11 + MOVB R11, (AX) + MOVL DI, 1(AX) + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm: + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x2f, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x32, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 524312(SP)(R10*4) + MOVL R13, 524312(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x2f, R9 + SHLQ $0x08, R10 + IMULQ BX, R10 + SHRQ $0x2f, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm + +emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x00010000 + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm + CMPL DX, $0x01000000 + JB four_bytes_emit_remainder_encodeSnappyBetterBlockAsm + MOVB $0xfc, (AX) + MOVL DX, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +four_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVL DX, BX + SHRL $0x10, BX + MOVB $0xf8, (AX) + MOVW DX, 1(AX) + MOVB BL, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsmlarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000a00, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm64K: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm64K + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm64K: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x07, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x00cf1bbcdcbfa563, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x30, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL 24(SP)(R9*4), BX + MOVL 262168(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 262168(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm64K + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm64K + +no_short_found_encodeSnappyBetterBlockAsm64K: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm64K + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm64K + +candidateS_match_encodeSnappyBetterBlockAsm64K: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x08, R9 + IMULQ R8, R9 + SHRQ $0x30, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm64K + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm64K: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + +match_extend_back_loop_encodeSnappyBetterBlockAsm64K: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm64K + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm64K + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm64K + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm64K + +match_extend_back_end_encodeSnappyBetterBlockAsm64K: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm64K: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + JB match_nolit_end_encodeSnappyBetterBlockAsm64K + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm64K + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm64K + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm64K: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm64K + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm64K + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm64K + +three_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +two_bytes_match_emit_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm64K + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm64K + +one_byte_match_emit_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm64K: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K + +memmove_long_match_emit_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm64K: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm64K + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm64K: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm64K: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm64K: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm64K + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm64K: + MOVQ $0x00cf1bbcdcbfa563, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x30, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x32, R10 + SHLQ $0x08, R11 + IMULQ BX, R11 + SHRQ $0x30, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x32, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 262168(SP)(R10*4) + MOVL R13, 262168(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm64K: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm64K + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x08, R9 + IMULQ BX, R9 + SHRQ $0x30, R9 + SHLQ $0x08, R10 + IMULQ BX, R10 + SHRQ $0x30, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm64K + +emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm64K + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm64K: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm64K + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm64K: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm64K_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64K: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm64Klarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000280, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm12B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm12B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm12B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x06, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL 24(SP)(R9*4), BX + MOVL 65560(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 65560(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm12B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm12B + +no_short_found_encodeSnappyBetterBlockAsm12B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm12B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm12B + +candidateS_match_encodeSnappyBetterBlockAsm12B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x32, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm12B + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm12B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + +match_extend_back_loop_encodeSnappyBetterBlockAsm12B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm12B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm12B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm12B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm12B + +match_extend_back_end_encodeSnappyBetterBlockAsm12B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm12B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + JB match_nolit_end_encodeSnappyBetterBlockAsm12B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm12B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm12B + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm12B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm12B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm12B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm12B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm12B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm12B + +one_byte_match_emit_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm12B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm12B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm12B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm12B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm12B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm12B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm12B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm12B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x34, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x32, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x34, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 65560(SP)(R10*4) + MOVL R13, 65560(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm12B: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm12B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x32, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x32, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm12B + +emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm12B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm12B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm12B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm12B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm12B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm12Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x000000a0, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm10B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm10B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm10B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL 24(SP)(R9*4), BX + MOVL 16408(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 16408(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm10B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm10B + +no_short_found_encodeSnappyBetterBlockAsm10B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm10B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm10B + +candidateS_match_encodeSnappyBetterBlockAsm10B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x34, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm10B + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm10B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + +match_extend_back_loop_encodeSnappyBetterBlockAsm10B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm10B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm10B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm10B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm10B + +match_extend_back_end_encodeSnappyBetterBlockAsm10B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm10B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + JB match_nolit_end_encodeSnappyBetterBlockAsm10B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm10B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm10B + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm10B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm10B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm10B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm10B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm10B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm10B + +one_byte_match_emit_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm10B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm10B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm10B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm10B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + CMPL DI, $0x00000800 + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm10B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm10B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm10B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm10B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x36, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x34, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x36, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 16408(SP)(R10*4) + MOVL R13, 16408(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm10B: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm10B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x34, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x34, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm10B + +emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm10B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm10B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm10B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm10B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm10B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm10Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int +// Requires: BMI, SSE2 +TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56 + MOVQ dst_base+0(FP), AX + MOVQ $0x00000028, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_encodeSnappyBetterBlockAsm8B: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_encodeSnappyBetterBlockAsm8B + MOVL $0x00000000, 12(SP) + MOVQ src_len+32(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL $0x00000000, 16(SP) + MOVQ src_base+24(FP), DX + +search_loop_encodeSnappyBetterBlockAsm8B: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 1(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ $0x9e3779b1, BX + MOVQ SI, R9 + MOVQ SI, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ BX, R10 + SHRQ $0x38, R10 + MOVL 24(SP)(R9*4), BX + MOVL 4120(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + MOVL CX, 4120(SP)(R10*4) + MOVQ (DX)(BX*1), R9 + MOVQ (DX)(DI*1), R10 + CMPQ R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPQ R10, SI + JNE no_short_found_encodeSnappyBetterBlockAsm8B + MOVL DI, BX + JMP candidate_match_encodeSnappyBetterBlockAsm8B + +no_short_found_encodeSnappyBetterBlockAsm8B: + CMPL R9, SI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + CMPL R10, SI + JEQ candidateS_match_encodeSnappyBetterBlockAsm8B + MOVL 20(SP), CX + JMP search_loop_encodeSnappyBetterBlockAsm8B + +candidateS_match_encodeSnappyBetterBlockAsm8B: + SHRQ $0x08, SI + MOVQ SI, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x36, R9 + MOVL 24(SP)(R9*4), BX + INCL CX + MOVL CX, 24(SP)(R9*4) + CMPL (DX)(BX*1), SI + JEQ candidate_match_encodeSnappyBetterBlockAsm8B + DECL CX + MOVL DI, BX + +candidate_match_encodeSnappyBetterBlockAsm8B: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + +match_extend_back_loop_encodeSnappyBetterBlockAsm8B: + CMPL CX, SI + JBE match_extend_back_end_encodeSnappyBetterBlockAsm8B + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_encodeSnappyBetterBlockAsm8B + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_encodeSnappyBetterBlockAsm8B + JMP match_extend_back_loop_encodeSnappyBetterBlockAsm8B + +match_extend_back_end_encodeSnappyBetterBlockAsm8B: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_dst_size_check_encodeSnappyBetterBlockAsm8B: + MOVL CX, SI + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+32(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), R9 + + // matchLen + XORL R11, R11 + CMPL DI, $0x08 + JB matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVQ (R8)(R11*1), R10 + XORQ (R9)(R11*1), R10 + TESTQ R10, R10 + JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B + +#ifdef GOAMD64_v3 + TZCNTQ R10, R10 + +#else + BSFQ R10, R10 + +#endif + SARQ $0x03, R10 + LEAL (R11)(R10*1), R11 + JMP match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B: + LEAL -8(DI), DI + LEAL 8(R11), R11 + CMPL DI, $0x08 + JAE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B + +matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL DI, $0x04 + JB matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + MOVL (R8)(R11*1), R10 + CMPL (R9)(R11*1), R10 + JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -4(DI), DI + LEAL 4(R11), R11 + +matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL DI, $0x01 + JE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + JB match_nolit_end_encodeSnappyBetterBlockAsm8B + MOVW (R8)(R11*1), R10 + CMPW (R9)(R11*1), R10 + JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL 2(R11), R11 + SUBL $0x02, DI + JZ match_nolit_end_encodeSnappyBetterBlockAsm8B + +matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVB (R8)(R11*1), R10 + CMPB (R9)(R11*1), R10 + JNE match_nolit_end_encodeSnappyBetterBlockAsm8B + LEAL 1(R11), R11 + +match_nolit_end_encodeSnappyBetterBlockAsm8B: + MOVL CX, DI + SUBL BX, DI + + // Check if repeat + MOVL DI, 16(SP) + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R9 + SUBL BX, R8 + LEAL -1(R8), BX + CMPL BX, $0x3c + JB one_byte_match_emit_encodeSnappyBetterBlockAsm8B + CMPL BX, $0x00000100 + JB two_bytes_match_emit_encodeSnappyBetterBlockAsm8B + JB three_bytes_match_emit_encodeSnappyBetterBlockAsm8B + +three_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW BX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +two_bytes_match_emit_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB BL, 1(AX) + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_match_emit_encodeSnappyBetterBlockAsm8B + JMP memmove_long_match_emit_encodeSnappyBetterBlockAsm8B + +one_byte_match_emit_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, BL + MOVB BL, (AX) + ADDQ $0x01, AX + +memmove_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8: + MOVQ (R9), R10 + MOVQ R10, (AX) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (R9), R10 + MOVQ -8(R9)(R8*1), R9 + MOVQ R10, (AX) + MOVQ R9, -8(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (R9), X0 + MOVOU -16(R9)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_match_emit_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_match_emit_encodeSnappyBetterBlockAsm8B: + MOVQ BX, AX + JMP emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B + +memmove_long_match_emit_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(R8*1), BX + + // genMemMoveLong + MOVOU (R9), X0 + MOVOU 16(R9), X1 + MOVOU -32(R9)(R8*1), X2 + MOVOU -16(R9)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R10 + ANDL $0x0000001f, R10 + MOVQ $0x00000040, R13 + SUBQ R10, R13 + DECQ R12 + JA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(R9)(R13*1), R10 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (R10), X4 + MOVOU 16(R10), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R10 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(R9)(R13*1), X4 + MOVOU -16(R9)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_match_emit_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ BX, AX + +emit_literal_done_match_emit_encodeSnappyBetterBlockAsm8B: + ADDL R11, CX + ADDL $0x04, R11 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B: + CMPL R11, $0x40 + JBE two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B + MOVB $0xee, (AX) + MOVW DI, 1(AX) + LEAL -60(R11), R11 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_encodeSnappyBetterBlockAsm8B + +two_byte_offset_short_match_nolit_encodeSnappyBetterBlockAsm8B: + MOVL R11, BX + SHLL $0x02, BX + CMPL R11, $0x0c + JAE emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B + LEAL -15(BX), BX + MOVB DI, 1(AX) + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, BX + MOVB BL, (AX) + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B + +emit_copy_three_match_nolit_encodeSnappyBetterBlockAsm8B: + LEAL -2(BX), BX + MOVB BL, (AX) + MOVW DI, 1(AX) + ADDQ $0x03, AX + +match_nolit_emitcopy_end_encodeSnappyBetterBlockAsm8B: + CMPL CX, 8(SP) + JAE emit_remainder_encodeSnappyBetterBlockAsm8B + CMPQ AX, (SP) + JB match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +match_nolit_dst_ok_encodeSnappyBetterBlockAsm8B: + MOVQ $0x0000cf1bbcdcbf9b, BX + MOVQ $0x9e3779b1, DI + LEAQ 1(SI), SI + LEAQ -2(CX), R8 + MOVQ (DX)(SI*1), R9 + MOVQ 1(DX)(SI*1), R10 + MOVQ (DX)(R8*1), R11 + MOVQ 1(DX)(R8*1), R12 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x20, R10 + IMULQ DI, R10 + SHRQ $0x38, R10 + SHLQ $0x10, R11 + IMULQ BX, R11 + SHRQ $0x36, R11 + SHLQ $0x20, R12 + IMULQ DI, R12 + SHRQ $0x38, R12 + LEAQ 1(SI), DI + LEAQ 1(R8), R13 + MOVL SI, 24(SP)(R9*4) + MOVL R8, 24(SP)(R11*4) + MOVL DI, 4120(SP)(R10*4) + MOVL R13, 4120(SP)(R12*4) + LEAQ 1(R8)(SI*1), DI + SHRQ $0x01, DI + ADDQ $0x01, SI + SUBQ $0x01, R8 + +index_loop_encodeSnappyBetterBlockAsm8B: + CMPQ DI, R8 + JAE search_loop_encodeSnappyBetterBlockAsm8B + MOVQ (DX)(SI*1), R9 + MOVQ (DX)(DI*1), R10 + SHLQ $0x10, R9 + IMULQ BX, R9 + SHRQ $0x36, R9 + SHLQ $0x10, R10 + IMULQ BX, R10 + SHRQ $0x36, R10 + MOVL SI, 24(SP)(R9*4) + MOVL DI, 24(SP)(R10*4) + ADDQ $0x02, SI + ADDQ $0x02, DI + JMP index_loop_encodeSnappyBetterBlockAsm8B + +emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_encodeSnappyBetterBlockAsm8B + MOVQ $0x00000000, ret+48(FP) + RET + +emit_remainder_ok_encodeSnappyBetterBlockAsm8B: + MOVQ src_len+32(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), DX + CMPL DX, $0x3c + JB one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B + CMPL DX, $0x00000100 + JB two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + JB three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B + +three_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf4, (AX) + MOVW DX, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +two_bytes_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVB $0xf0, (AX) + MOVB DL, 1(AX) + ADDQ $0x02, AX + CMPL DX, $0x40 + JB memmove_emit_remainder_encodeSnappyBetterBlockAsm8B + JMP memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B + +one_byte_emit_remainder_encodeSnappyBetterBlockAsm8B: + SHLB $0x02, DL + MOVB DL, (AX) + ADDQ $0x01, AX + +memmove_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveShort + CMPQ BX, $0x03 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2 + JE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3 + CMPQ BX, $0x08 + JB emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7 + CMPQ BX, $0x10 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16 + CMPQ BX, $0x20 + JBE emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32 + JMP emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64 + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(BX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(BX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(BX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(BX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(BX*1) + JMP memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B + +emit_lit_memmove_emit_remainder_encodeSnappyBetterBlockAsm8B_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + +memmove_end_copy_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ DX, AX + JMP emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B + +memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8B: + LEAQ (AX)(SI*1), DX + MOVL SI, BX + + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(BX*1), X2 + MOVOU -16(CX)(BX*1), X3 + MOVQ BX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_big_loop_back + +emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ BX, R8 + JAE emit_lit_memmove_long_emit_remainder_encodeSnappyBetterBlockAsm8Blarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(BX*1) + MOVOU X3, -16(AX)(BX*1) + MOVQ DX, AX + +emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm8B: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ AX, ret+48(FP) + RET + +// func calcBlockSize(src []byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSize(SB), $32792-32 + XORQ AX, AX + MOVQ $0x00000100, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_calcBlockSize: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_calcBlockSize + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+0(FP), DX + +search_loop_calcBlockSize: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x05, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_calcBlockSize + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x33, R9 + SHLQ $0x10, R10 + IMULQ R8, R10 + SHRQ $0x33, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x10, R9 + IMULQ R8, R9 + SHRQ $0x33, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_calcBlockSize + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_calcBlockSize + +repeat_extend_back_loop_calcBlockSize: + CMPL SI, BX + JBE repeat_extend_back_end_calcBlockSize + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_calcBlockSize + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_calcBlockSize + +repeat_extend_back_end_calcBlockSize: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_calcBlockSize + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_calcBlockSize + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_calcBlockSize + CMPL BX, $0x00010000 + JB three_bytes_repeat_emit_calcBlockSize + CMPL BX, $0x01000000 + JB four_bytes_repeat_emit_calcBlockSize + ADDQ $0x05, AX + JMP memmove_long_repeat_emit_calcBlockSize + +four_bytes_repeat_emit_calcBlockSize: + ADDQ $0x04, AX + JMP memmove_long_repeat_emit_calcBlockSize + +three_bytes_repeat_emit_calcBlockSize: + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_calcBlockSize + +two_bytes_repeat_emit_calcBlockSize: + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_calcBlockSize + JMP memmove_long_repeat_emit_calcBlockSize + +one_byte_repeat_emit_calcBlockSize: + ADDQ $0x01, AX + +memmove_repeat_emit_calcBlockSize: + LEAQ (AX)(DI*1), AX + JMP emit_literal_done_repeat_emit_calcBlockSize + +memmove_long_repeat_emit_calcBlockSize: + LEAQ (AX)(DI*1), AX + +emit_literal_done_repeat_emit_calcBlockSize: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+8(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_calcBlockSize + +matchlen_loopback_repeat_extend_calcBlockSize: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_calcBlockSize + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_calcBlockSize + +matchlen_loop_repeat_extend_calcBlockSize: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JAE matchlen_loopback_repeat_extend_calcBlockSize + +matchlen_match4_repeat_extend_calcBlockSize: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_calcBlockSize + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_calcBlockSize + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_calcBlockSize: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_calcBlockSize + JB repeat_extend_forward_end_calcBlockSize + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_calcBlockSize + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_calcBlockSize + +matchlen_match1_repeat_extend_calcBlockSize: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_calcBlockSize + LEAL 1(R10), R10 + +repeat_extend_forward_end_calcBlockSize: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy + CMPL SI, $0x00010000 + JB two_byte_offset_repeat_as_copy_calcBlockSize + +four_bytes_loop_back_repeat_as_copy_calcBlockSize: + CMPL BX, $0x40 + JBE four_bytes_remain_repeat_as_copy_calcBlockSize + LEAL -64(BX), BX + ADDQ $0x05, AX + CMPL BX, $0x04 + JB four_bytes_remain_repeat_as_copy_calcBlockSize + JMP four_bytes_loop_back_repeat_as_copy_calcBlockSize + +four_bytes_remain_repeat_as_copy_calcBlockSize: + TESTL BX, BX + JZ repeat_end_emit_calcBlockSize + XORL BX, BX + ADDQ $0x05, AX + JMP repeat_end_emit_calcBlockSize + +two_byte_offset_repeat_as_copy_calcBlockSize: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_calcBlockSize + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_calcBlockSize + +two_byte_offset_short_repeat_as_copy_calcBlockSize: + MOVL BX, DI + SHLL $0x02, DI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_calcBlockSize + CMPL SI, $0x00000800 + JAE emit_copy_three_repeat_as_copy_calcBlockSize + ADDQ $0x02, AX + JMP repeat_end_emit_calcBlockSize + +emit_copy_three_repeat_as_copy_calcBlockSize: + ADDQ $0x03, AX + +repeat_end_emit_calcBlockSize: + MOVL CX, 12(SP) + JMP search_loop_calcBlockSize + +no_repeat_found_calcBlockSize: + CMPL (DX)(BX*1), SI + JEQ candidate_match_calcBlockSize + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_calcBlockSize + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_calcBlockSize + MOVL 20(SP), CX + JMP search_loop_calcBlockSize + +candidate3_match_calcBlockSize: + ADDL $0x02, CX + JMP candidate_match_calcBlockSize + +candidate2_match_calcBlockSize: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_calcBlockSize: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_calcBlockSize + +match_extend_back_loop_calcBlockSize: + CMPL CX, SI + JBE match_extend_back_end_calcBlockSize + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_calcBlockSize + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_calcBlockSize + JMP match_extend_back_loop_calcBlockSize + +match_extend_back_end_calcBlockSize: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 5(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +match_dst_size_check_calcBlockSize: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_calcBlockSize + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_match_emit_calcBlockSize + CMPL SI, $0x00000100 + JB two_bytes_match_emit_calcBlockSize + CMPL SI, $0x00010000 + JB three_bytes_match_emit_calcBlockSize + CMPL SI, $0x01000000 + JB four_bytes_match_emit_calcBlockSize + ADDQ $0x05, AX + JMP memmove_long_match_emit_calcBlockSize + +four_bytes_match_emit_calcBlockSize: + ADDQ $0x04, AX + JMP memmove_long_match_emit_calcBlockSize + +three_bytes_match_emit_calcBlockSize: + ADDQ $0x03, AX + JMP memmove_long_match_emit_calcBlockSize + +two_bytes_match_emit_calcBlockSize: + ADDQ $0x02, AX + CMPL SI, $0x40 + JB memmove_match_emit_calcBlockSize + JMP memmove_long_match_emit_calcBlockSize + +one_byte_match_emit_calcBlockSize: + ADDQ $0x01, AX + +memmove_match_emit_calcBlockSize: + LEAQ (AX)(R8*1), AX + JMP emit_literal_done_match_emit_calcBlockSize + +memmove_long_match_emit_calcBlockSize: + LEAQ (AX)(R8*1), AX + +emit_literal_done_match_emit_calcBlockSize: +match_nolit_loop_calcBlockSize: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+8(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_calcBlockSize + +matchlen_loopback_match_nolit_calcBlockSize: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_calcBlockSize + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_calcBlockSize + +matchlen_loop_match_nolit_calcBlockSize: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_calcBlockSize + +matchlen_match4_match_nolit_calcBlockSize: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_calcBlockSize + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_calcBlockSize + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_calcBlockSize: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_calcBlockSize + JB match_nolit_end_calcBlockSize + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_calcBlockSize + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_calcBlockSize + +matchlen_match1_match_nolit_calcBlockSize: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_calcBlockSize + LEAL 1(R9), R9 + +match_nolit_end_calcBlockSize: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy + CMPL BX, $0x00010000 + JB two_byte_offset_match_nolit_calcBlockSize + +four_bytes_loop_back_match_nolit_calcBlockSize: + CMPL R9, $0x40 + JBE four_bytes_remain_match_nolit_calcBlockSize + LEAL -64(R9), R9 + ADDQ $0x05, AX + CMPL R9, $0x04 + JB four_bytes_remain_match_nolit_calcBlockSize + JMP four_bytes_loop_back_match_nolit_calcBlockSize + +four_bytes_remain_match_nolit_calcBlockSize: + TESTL R9, R9 + JZ match_nolit_emitcopy_end_calcBlockSize + XORL BX, BX + ADDQ $0x05, AX + JMP match_nolit_emitcopy_end_calcBlockSize + +two_byte_offset_match_nolit_calcBlockSize: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_calcBlockSize + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_calcBlockSize + +two_byte_offset_short_match_nolit_calcBlockSize: + MOVL R9, SI + SHLL $0x02, SI + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_calcBlockSize + CMPL BX, $0x00000800 + JAE emit_copy_three_match_nolit_calcBlockSize + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_calcBlockSize + +emit_copy_three_match_nolit_calcBlockSize: + ADDQ $0x03, AX + +match_nolit_emitcopy_end_calcBlockSize: + CMPL CX, 8(SP) + JAE emit_remainder_calcBlockSize + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +match_nolit_dst_ok_calcBlockSize: + MOVQ $0x0000cf1bbcdcbf9b, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x10, DI + IMULQ R8, DI + SHRQ $0x33, DI + SHLQ $0x10, BX + IMULQ R8, BX + SHRQ $0x33, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_calcBlockSize + INCL CX + JMP search_loop_calcBlockSize + +emit_remainder_calcBlockSize: + MOVQ src_len+8(FP), CX + SUBL 12(SP), CX + LEAQ 5(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_calcBlockSize + MOVQ $0x00000000, ret+24(FP) + RET + +emit_remainder_ok_calcBlockSize: + MOVQ src_len+8(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_calcBlockSize + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), CX + CMPL CX, $0x3c + JB one_byte_emit_remainder_calcBlockSize + CMPL CX, $0x00000100 + JB two_bytes_emit_remainder_calcBlockSize + CMPL CX, $0x00010000 + JB three_bytes_emit_remainder_calcBlockSize + CMPL CX, $0x01000000 + JB four_bytes_emit_remainder_calcBlockSize + ADDQ $0x05, AX + JMP memmove_long_emit_remainder_calcBlockSize + +four_bytes_emit_remainder_calcBlockSize: + ADDQ $0x04, AX + JMP memmove_long_emit_remainder_calcBlockSize + +three_bytes_emit_remainder_calcBlockSize: + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_calcBlockSize + +two_bytes_emit_remainder_calcBlockSize: + ADDQ $0x02, AX + CMPL CX, $0x40 + JB memmove_emit_remainder_calcBlockSize + JMP memmove_long_emit_remainder_calcBlockSize + +one_byte_emit_remainder_calcBlockSize: + ADDQ $0x01, AX + +memmove_emit_remainder_calcBlockSize: + LEAQ (AX)(SI*1), AX + JMP emit_literal_done_emit_remainder_calcBlockSize + +memmove_long_emit_remainder_calcBlockSize: + LEAQ (AX)(SI*1), AX + +emit_literal_done_emit_remainder_calcBlockSize: + MOVQ AX, ret+24(FP) + RET + +// func calcBlockSizeSmall(src []byte) int +// Requires: BMI, SSE2 +TEXT ·calcBlockSizeSmall(SB), $2072-32 + XORQ AX, AX + MOVQ $0x00000010, CX + LEAQ 24(SP), DX + PXOR X0, X0 + +zero_loop_calcBlockSizeSmall: + MOVOU X0, (DX) + MOVOU X0, 16(DX) + MOVOU X0, 32(DX) + MOVOU X0, 48(DX) + MOVOU X0, 64(DX) + MOVOU X0, 80(DX) + MOVOU X0, 96(DX) + MOVOU X0, 112(DX) + ADDQ $0x80, DX + DECQ CX + JNZ zero_loop_calcBlockSizeSmall + MOVL $0x00000000, 12(SP) + MOVQ src_len+8(FP), CX + LEAQ -9(CX), DX + LEAQ -8(CX), BX + MOVL BX, 8(SP) + SHRQ $0x05, CX + SUBL CX, DX + LEAQ (AX)(DX*1), DX + MOVQ DX, (SP) + MOVL $0x00000001, CX + MOVL CX, 16(SP) + MOVQ src_base+0(FP), DX + +search_loop_calcBlockSizeSmall: + MOVL CX, BX + SUBL 12(SP), BX + SHRL $0x04, BX + LEAL 4(CX)(BX*1), BX + CMPL BX, 8(SP) + JAE emit_remainder_calcBlockSizeSmall + MOVQ (DX)(CX*1), SI + MOVL BX, 20(SP) + MOVQ $0x9e3779b1, R8 + MOVQ SI, R9 + MOVQ SI, R10 + SHRQ $0x08, R10 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x37, R9 + SHLQ $0x20, R10 + IMULQ R8, R10 + SHRQ $0x37, R10 + MOVL 24(SP)(R9*4), BX + MOVL 24(SP)(R10*4), DI + MOVL CX, 24(SP)(R9*4) + LEAL 1(CX), R9 + MOVL R9, 24(SP)(R10*4) + MOVQ SI, R9 + SHRQ $0x10, R9 + SHLQ $0x20, R9 + IMULQ R8, R9 + SHRQ $0x37, R9 + MOVL CX, R8 + SUBL 16(SP), R8 + MOVL 1(DX)(R8*1), R10 + MOVQ SI, R8 + SHRQ $0x08, R8 + CMPL R8, R10 + JNE no_repeat_found_calcBlockSizeSmall + LEAL 1(CX), SI + MOVL 12(SP), BX + MOVL SI, DI + SUBL 16(SP), DI + JZ repeat_extend_back_end_calcBlockSizeSmall + +repeat_extend_back_loop_calcBlockSizeSmall: + CMPL SI, BX + JBE repeat_extend_back_end_calcBlockSizeSmall + MOVB -1(DX)(DI*1), R8 + MOVB -1(DX)(SI*1), R9 + CMPB R8, R9 + JNE repeat_extend_back_end_calcBlockSizeSmall + LEAL -1(SI), SI + DECL DI + JNZ repeat_extend_back_loop_calcBlockSizeSmall + +repeat_extend_back_end_calcBlockSizeSmall: + MOVL 12(SP), BX + CMPL BX, SI + JEQ emit_literal_done_repeat_emit_calcBlockSizeSmall + MOVL SI, DI + MOVL SI, 12(SP) + LEAQ (DX)(BX*1), R8 + SUBL BX, DI + LEAL -1(DI), BX + CMPL BX, $0x3c + JB one_byte_repeat_emit_calcBlockSizeSmall + CMPL BX, $0x00000100 + JB two_bytes_repeat_emit_calcBlockSizeSmall + JB three_bytes_repeat_emit_calcBlockSizeSmall + +three_bytes_repeat_emit_calcBlockSizeSmall: + ADDQ $0x03, AX + JMP memmove_long_repeat_emit_calcBlockSizeSmall + +two_bytes_repeat_emit_calcBlockSizeSmall: + ADDQ $0x02, AX + CMPL BX, $0x40 + JB memmove_repeat_emit_calcBlockSizeSmall + JMP memmove_long_repeat_emit_calcBlockSizeSmall + +one_byte_repeat_emit_calcBlockSizeSmall: + ADDQ $0x01, AX + +memmove_repeat_emit_calcBlockSizeSmall: + LEAQ (AX)(DI*1), AX + JMP emit_literal_done_repeat_emit_calcBlockSizeSmall + +memmove_long_repeat_emit_calcBlockSizeSmall: + LEAQ (AX)(DI*1), AX + +emit_literal_done_repeat_emit_calcBlockSizeSmall: + ADDL $0x05, CX + MOVL CX, BX + SUBL 16(SP), BX + MOVQ src_len+8(FP), DI + SUBL CX, DI + LEAQ (DX)(CX*1), R8 + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R10, R10 + CMPL DI, $0x08 + JB matchlen_match4_repeat_extend_calcBlockSizeSmall + +matchlen_loopback_repeat_extend_calcBlockSizeSmall: + MOVQ (R8)(R10*1), R9 + XORQ (BX)(R10*1), R9 + TESTQ R9, R9 + JZ matchlen_loop_repeat_extend_calcBlockSizeSmall + +#ifdef GOAMD64_v3 + TZCNTQ R9, R9 + +#else + BSFQ R9, R9 + +#endif + SARQ $0x03, R9 + LEAL (R10)(R9*1), R10 + JMP repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_loop_repeat_extend_calcBlockSizeSmall: + LEAL -8(DI), DI + LEAL 8(R10), R10 + CMPL DI, $0x08 + JAE matchlen_loopback_repeat_extend_calcBlockSizeSmall + +matchlen_match4_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x04 + JB matchlen_match2_repeat_extend_calcBlockSizeSmall + MOVL (R8)(R10*1), R9 + CMPL (BX)(R10*1), R9 + JNE matchlen_match2_repeat_extend_calcBlockSizeSmall + LEAL -4(DI), DI + LEAL 4(R10), R10 + +matchlen_match2_repeat_extend_calcBlockSizeSmall: + CMPL DI, $0x01 + JE matchlen_match1_repeat_extend_calcBlockSizeSmall + JB repeat_extend_forward_end_calcBlockSizeSmall + MOVW (R8)(R10*1), R9 + CMPW (BX)(R10*1), R9 + JNE matchlen_match1_repeat_extend_calcBlockSizeSmall + LEAL 2(R10), R10 + SUBL $0x02, DI + JZ repeat_extend_forward_end_calcBlockSizeSmall + +matchlen_match1_repeat_extend_calcBlockSizeSmall: + MOVB (R8)(R10*1), R9 + CMPB (BX)(R10*1), R9 + JNE repeat_extend_forward_end_calcBlockSizeSmall + LEAL 1(R10), R10 + +repeat_extend_forward_end_calcBlockSizeSmall: + ADDL R10, CX + MOVL CX, BX + SUBL SI, BX + MOVL 16(SP), SI + + // emitCopy +two_byte_offset_repeat_as_copy_calcBlockSizeSmall: + CMPL BX, $0x40 + JBE two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall + LEAL -60(BX), BX + ADDQ $0x03, AX + JMP two_byte_offset_repeat_as_copy_calcBlockSizeSmall + +two_byte_offset_short_repeat_as_copy_calcBlockSizeSmall: + MOVL BX, SI + SHLL $0x02, SI + CMPL BX, $0x0c + JAE emit_copy_three_repeat_as_copy_calcBlockSizeSmall + ADDQ $0x02, AX + JMP repeat_end_emit_calcBlockSizeSmall + +emit_copy_three_repeat_as_copy_calcBlockSizeSmall: + ADDQ $0x03, AX + +repeat_end_emit_calcBlockSizeSmall: + MOVL CX, 12(SP) + JMP search_loop_calcBlockSizeSmall + +no_repeat_found_calcBlockSizeSmall: + CMPL (DX)(BX*1), SI + JEQ candidate_match_calcBlockSizeSmall + SHRQ $0x08, SI + MOVL 24(SP)(R9*4), BX + LEAL 2(CX), R8 + CMPL (DX)(DI*1), SI + JEQ candidate2_match_calcBlockSizeSmall + MOVL R8, 24(SP)(R9*4) + SHRQ $0x08, SI + CMPL (DX)(BX*1), SI + JEQ candidate3_match_calcBlockSizeSmall + MOVL 20(SP), CX + JMP search_loop_calcBlockSizeSmall + +candidate3_match_calcBlockSizeSmall: + ADDL $0x02, CX + JMP candidate_match_calcBlockSizeSmall + +candidate2_match_calcBlockSizeSmall: + MOVL R8, 24(SP)(R9*4) + INCL CX + MOVL DI, BX + +candidate_match_calcBlockSizeSmall: + MOVL 12(SP), SI + TESTL BX, BX + JZ match_extend_back_end_calcBlockSizeSmall + +match_extend_back_loop_calcBlockSizeSmall: + CMPL CX, SI + JBE match_extend_back_end_calcBlockSizeSmall + MOVB -1(DX)(BX*1), DI + MOVB -1(DX)(CX*1), R8 + CMPB DI, R8 + JNE match_extend_back_end_calcBlockSizeSmall + LEAL -1(CX), CX + DECL BX + JZ match_extend_back_end_calcBlockSizeSmall + JMP match_extend_back_loop_calcBlockSizeSmall + +match_extend_back_end_calcBlockSizeSmall: + MOVL CX, SI + SUBL 12(SP), SI + LEAQ 3(AX)(SI*1), SI + CMPQ SI, (SP) + JB match_dst_size_check_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +match_dst_size_check_calcBlockSizeSmall: + MOVL CX, SI + MOVL 12(SP), DI + CMPL DI, SI + JEQ emit_literal_done_match_emit_calcBlockSizeSmall + MOVL SI, R8 + MOVL SI, 12(SP) + LEAQ (DX)(DI*1), SI + SUBL DI, R8 + LEAL -1(R8), SI + CMPL SI, $0x3c + JB one_byte_match_emit_calcBlockSizeSmall + CMPL SI, $0x00000100 + JB two_bytes_match_emit_calcBlockSizeSmall + JB three_bytes_match_emit_calcBlockSizeSmall + +three_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x03, AX + JMP memmove_long_match_emit_calcBlockSizeSmall + +two_bytes_match_emit_calcBlockSizeSmall: + ADDQ $0x02, AX + CMPL SI, $0x40 + JB memmove_match_emit_calcBlockSizeSmall + JMP memmove_long_match_emit_calcBlockSizeSmall + +one_byte_match_emit_calcBlockSizeSmall: + ADDQ $0x01, AX + +memmove_match_emit_calcBlockSizeSmall: + LEAQ (AX)(R8*1), AX + JMP emit_literal_done_match_emit_calcBlockSizeSmall + +memmove_long_match_emit_calcBlockSizeSmall: + LEAQ (AX)(R8*1), AX + +emit_literal_done_match_emit_calcBlockSizeSmall: +match_nolit_loop_calcBlockSizeSmall: + MOVL CX, SI + SUBL BX, SI + MOVL SI, 16(SP) + ADDL $0x04, CX + ADDL $0x04, BX + MOVQ src_len+8(FP), SI + SUBL CX, SI + LEAQ (DX)(CX*1), DI + LEAQ (DX)(BX*1), BX + + // matchLen + XORL R9, R9 + CMPL SI, $0x08 + JB matchlen_match4_match_nolit_calcBlockSizeSmall + +matchlen_loopback_match_nolit_calcBlockSizeSmall: + MOVQ (DI)(R9*1), R8 + XORQ (BX)(R9*1), R8 + TESTQ R8, R8 + JZ matchlen_loop_match_nolit_calcBlockSizeSmall + +#ifdef GOAMD64_v3 + TZCNTQ R8, R8 + +#else + BSFQ R8, R8 + +#endif + SARQ $0x03, R8 + LEAL (R9)(R8*1), R9 + JMP match_nolit_end_calcBlockSizeSmall + +matchlen_loop_match_nolit_calcBlockSizeSmall: + LEAL -8(SI), SI + LEAL 8(R9), R9 + CMPL SI, $0x08 + JAE matchlen_loopback_match_nolit_calcBlockSizeSmall + +matchlen_match4_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x04 + JB matchlen_match2_match_nolit_calcBlockSizeSmall + MOVL (DI)(R9*1), R8 + CMPL (BX)(R9*1), R8 + JNE matchlen_match2_match_nolit_calcBlockSizeSmall + LEAL -4(SI), SI + LEAL 4(R9), R9 + +matchlen_match2_match_nolit_calcBlockSizeSmall: + CMPL SI, $0x01 + JE matchlen_match1_match_nolit_calcBlockSizeSmall + JB match_nolit_end_calcBlockSizeSmall + MOVW (DI)(R9*1), R8 + CMPW (BX)(R9*1), R8 + JNE matchlen_match1_match_nolit_calcBlockSizeSmall + LEAL 2(R9), R9 + SUBL $0x02, SI + JZ match_nolit_end_calcBlockSizeSmall + +matchlen_match1_match_nolit_calcBlockSizeSmall: + MOVB (DI)(R9*1), R8 + CMPB (BX)(R9*1), R8 + JNE match_nolit_end_calcBlockSizeSmall + LEAL 1(R9), R9 + +match_nolit_end_calcBlockSizeSmall: + ADDL R9, CX + MOVL 16(SP), BX + ADDL $0x04, R9 + MOVL CX, 12(SP) + + // emitCopy +two_byte_offset_match_nolit_calcBlockSizeSmall: + CMPL R9, $0x40 + JBE two_byte_offset_short_match_nolit_calcBlockSizeSmall + LEAL -60(R9), R9 + ADDQ $0x03, AX + JMP two_byte_offset_match_nolit_calcBlockSizeSmall + +two_byte_offset_short_match_nolit_calcBlockSizeSmall: + MOVL R9, BX + SHLL $0x02, BX + CMPL R9, $0x0c + JAE emit_copy_three_match_nolit_calcBlockSizeSmall + ADDQ $0x02, AX + JMP match_nolit_emitcopy_end_calcBlockSizeSmall + +emit_copy_three_match_nolit_calcBlockSizeSmall: + ADDQ $0x03, AX + +match_nolit_emitcopy_end_calcBlockSizeSmall: + CMPL CX, 8(SP) + JAE emit_remainder_calcBlockSizeSmall + MOVQ -2(DX)(CX*1), SI + CMPQ AX, (SP) + JB match_nolit_dst_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +match_nolit_dst_ok_calcBlockSizeSmall: + MOVQ $0x9e3779b1, R8 + MOVQ SI, DI + SHRQ $0x10, SI + MOVQ SI, BX + SHLQ $0x20, DI + IMULQ R8, DI + SHRQ $0x37, DI + SHLQ $0x20, BX + IMULQ R8, BX + SHRQ $0x37, BX + LEAL -2(CX), R8 + LEAQ 24(SP)(BX*4), R9 + MOVL (R9), BX + MOVL R8, 24(SP)(DI*4) + MOVL CX, (R9) + CMPL (DX)(BX*1), SI + JEQ match_nolit_loop_calcBlockSizeSmall + INCL CX + JMP search_loop_calcBlockSizeSmall + +emit_remainder_calcBlockSizeSmall: + MOVQ src_len+8(FP), CX + SUBL 12(SP), CX + LEAQ 3(AX)(CX*1), CX + CMPQ CX, (SP) + JB emit_remainder_ok_calcBlockSizeSmall + MOVQ $0x00000000, ret+24(FP) + RET + +emit_remainder_ok_calcBlockSizeSmall: + MOVQ src_len+8(FP), CX + MOVL 12(SP), BX + CMPL BX, CX + JEQ emit_literal_done_emit_remainder_calcBlockSizeSmall + MOVL CX, SI + MOVL CX, 12(SP) + LEAQ (DX)(BX*1), CX + SUBL BX, SI + LEAL -1(SI), CX + CMPL CX, $0x3c + JB one_byte_emit_remainder_calcBlockSizeSmall + CMPL CX, $0x00000100 + JB two_bytes_emit_remainder_calcBlockSizeSmall + JB three_bytes_emit_remainder_calcBlockSizeSmall + +three_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x03, AX + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +two_bytes_emit_remainder_calcBlockSizeSmall: + ADDQ $0x02, AX + CMPL CX, $0x40 + JB memmove_emit_remainder_calcBlockSizeSmall + JMP memmove_long_emit_remainder_calcBlockSizeSmall + +one_byte_emit_remainder_calcBlockSizeSmall: + ADDQ $0x01, AX + +memmove_emit_remainder_calcBlockSizeSmall: + LEAQ (AX)(SI*1), AX + JMP emit_literal_done_emit_remainder_calcBlockSizeSmall + +memmove_long_emit_remainder_calcBlockSizeSmall: + LEAQ (AX)(SI*1), AX + +emit_literal_done_emit_remainder_calcBlockSizeSmall: + MOVQ AX, ret+24(FP) + RET + +// func emitLiteral(dst []byte, lit []byte) int +// Requires: SSE2 +TEXT ·emitLiteral(SB), NOSPLIT, $0-56 + MOVQ lit_len+32(FP), DX + MOVQ dst_base+0(FP), AX + MOVQ lit_base+24(FP), CX + TESTQ DX, DX + JZ emit_literal_end_standalone_skip + MOVL DX, BX + LEAL -1(DX), SI + CMPL SI, $0x3c + JB one_byte_standalone + CMPL SI, $0x00000100 + JB two_bytes_standalone + CMPL SI, $0x00010000 + JB three_bytes_standalone + CMPL SI, $0x01000000 + JB four_bytes_standalone + MOVB $0xfc, (AX) + MOVL SI, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP memmove_long_standalone + +four_bytes_standalone: + MOVL SI, DI + SHRL $0x10, DI + MOVB $0xf8, (AX) + MOVW SI, 1(AX) + MOVB DI, 3(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP memmove_long_standalone + +three_bytes_standalone: + MOVB $0xf4, (AX) + MOVW SI, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP memmove_long_standalone + +two_bytes_standalone: + MOVB $0xf0, (AX) + MOVB SI, 1(AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + CMPL SI, $0x40 + JB memmove_standalone + JMP memmove_long_standalone + +one_byte_standalone: + SHLB $0x02, SI + MOVB SI, (AX) + ADDQ $0x01, BX + ADDQ $0x01, AX + +memmove_standalone: + // genMemMoveShort + CMPQ DX, $0x03 + JB emit_lit_memmove_standalone_memmove_move_1or2 + JE emit_lit_memmove_standalone_memmove_move_3 + CMPQ DX, $0x08 + JB emit_lit_memmove_standalone_memmove_move_4through7 + CMPQ DX, $0x10 + JBE emit_lit_memmove_standalone_memmove_move_8through16 + CMPQ DX, $0x20 + JBE emit_lit_memmove_standalone_memmove_move_17through32 + JMP emit_lit_memmove_standalone_memmove_move_33through64 + +emit_lit_memmove_standalone_memmove_move_1or2: + MOVB (CX), SI + MOVB -1(CX)(DX*1), CL + MOVB SI, (AX) + MOVB CL, -1(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_3: + MOVW (CX), SI + MOVB 2(CX), CL + MOVW SI, (AX) + MOVB CL, 2(AX) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_4through7: + MOVL (CX), SI + MOVL -4(CX)(DX*1), CX + MOVL SI, (AX) + MOVL CX, -4(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_8through16: + MOVQ (CX), SI + MOVQ -8(CX)(DX*1), CX + MOVQ SI, (AX) + MOVQ CX, -8(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_17through32: + MOVOU (CX), X0 + MOVOU -16(CX)(DX*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(DX*1) + JMP emit_literal_end_standalone + +emit_lit_memmove_standalone_memmove_move_33through64: + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +memmove_long_standalone: + // genMemMoveLong + MOVOU (CX), X0 + MOVOU 16(CX), X1 + MOVOU -32(CX)(DX*1), X2 + MOVOU -16(CX)(DX*1), X3 + MOVQ DX, DI + SHRQ $0x05, DI + MOVQ AX, SI + ANDL $0x0000001f, SI + MOVQ $0x00000040, R8 + SUBQ SI, R8 + DECQ DI + JA emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + LEAQ -32(CX)(R8*1), SI + LEAQ -32(AX)(R8*1), R9 + +emit_lit_memmove_long_standalonelarge_big_loop_back: + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOA X4, (R9) + MOVOA X5, 16(R9) + ADDQ $0x20, R9 + ADDQ $0x20, SI + ADDQ $0x20, R8 + DECQ DI + JNA emit_lit_memmove_long_standalonelarge_big_loop_back + +emit_lit_memmove_long_standalonelarge_forward_sse_loop_32: + MOVOU -32(CX)(R8*1), X4 + MOVOU -16(CX)(R8*1), X5 + MOVOA X4, -32(AX)(R8*1) + MOVOA X5, -16(AX)(R8*1) + ADDQ $0x20, R8 + CMPQ DX, R8 + JAE emit_lit_memmove_long_standalonelarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(DX*1) + MOVOU X3, -16(AX)(DX*1) + JMP emit_literal_end_standalone + JMP emit_literal_end_standalone + +emit_literal_end_standalone_skip: + XORQ BX, BX + +emit_literal_end_standalone: + MOVQ BX, ret+48(FP) + RET + +// func emitRepeat(dst []byte, offset int, length int) int +TEXT ·emitRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitRepeat +emit_repeat_again_standalone: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone + +cant_repeat_two_offset_standalone: + CMPL DX, $0x00000104 + JB repeat_three_standalone + CMPL DX, $0x00010100 + JB repeat_four_standalone + CMPL DX, $0x0100ffff + JB repeat_five_standalone + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone + +repeat_five_standalone: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_repeat_end + +repeat_four_standalone: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_repeat_end + +repeat_three_standalone: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_repeat_end + +repeat_two_standalone: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_repeat_end + +repeat_two_offset_standalone: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + +gen_emit_repeat_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopy(dst []byte, offset int, length int) int +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JB two_byte_offset_standalone + CMPL DX, $0x40 + JBE four_bytes_remain_standalone + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JB four_bytes_remain_standalone + + // emitRepeat +emit_repeat_again_standalone_emit_copy: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy + +cant_repeat_two_offset_standalone_emit_copy: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy + +repeat_five_standalone_emit_copy: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +four_bytes_remain_standalone: + TESTL DX, DX + JZ gen_emit_copy_end + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +two_byte_offset_standalone: + CMPL DX, $0x40 + JBE two_byte_offset_short_standalone + CMPL CX, $0x00000800 + JAE long_offset_short_standalone + MOVL $0x00000001, SI + LEAL 16(SI), SI + MOVB CL, 1(AX) + MOVL CX, DI + SHRL $0x08, DI + SHLL $0x05, DI + ORL DI, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + SUBL $0x08, DX + + // emitRepeat + LEAL -4(DX), DX + JMP cant_repeat_two_offset_standalone_emit_copy_short_2b + +emit_repeat_again_standalone_emit_copy_short_2b: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy_short_2b + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy_short_2b + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy_short_2b + +cant_repeat_two_offset_standalone_emit_copy_short_2b: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy_short_2b + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy_short_2b + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy_short_2b + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short_2b + +repeat_five_standalone_emit_copy_short_2b: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short_2b: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short_2b: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short_2b: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short_2b: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +long_offset_short_standalone: + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + + // emitRepeat +emit_repeat_again_standalone_emit_copy_short: + MOVL DX, SI + LEAL -4(DX), DX + CMPL SI, $0x08 + JBE repeat_two_standalone_emit_copy_short + CMPL SI, $0x0c + JAE cant_repeat_two_offset_standalone_emit_copy_short + CMPL CX, $0x00000800 + JB repeat_two_offset_standalone_emit_copy_short + +cant_repeat_two_offset_standalone_emit_copy_short: + CMPL DX, $0x00000104 + JB repeat_three_standalone_emit_copy_short + CMPL DX, $0x00010100 + JB repeat_four_standalone_emit_copy_short + CMPL DX, $0x0100ffff + JB repeat_five_standalone_emit_copy_short + LEAL -16842747(DX), DX + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + ADDQ $0x05, BX + JMP emit_repeat_again_standalone_emit_copy_short + +repeat_five_standalone_emit_copy_short: + LEAL -65536(DX), DX + MOVL DX, CX + MOVW $0x001d, (AX) + MOVW DX, 2(AX) + SARL $0x10, CX + MOVB CL, 4(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end + +repeat_four_standalone_emit_copy_short: + LEAL -256(DX), DX + MOVW $0x0019, (AX) + MOVW DX, 2(AX) + ADDQ $0x04, BX + ADDQ $0x04, AX + JMP gen_emit_copy_end + +repeat_three_standalone_emit_copy_short: + LEAL -4(DX), DX + MOVW $0x0015, (AX) + MOVB DL, 2(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + JMP gen_emit_copy_end + +repeat_two_standalone_emit_copy_short: + SHLL $0x02, DX + ORL $0x01, DX + MOVW DX, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +repeat_two_offset_standalone_emit_copy_short: + XORQ SI, SI + LEAL 1(SI)(DX*4), DX + MOVB CL, 1(AX) + SARL $0x08, CX + SHLL $0x05, CX + ORL CX, DX + MOVB DL, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +two_byte_offset_short_standalone: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JAE emit_copy_three_standalone + CMPL CX, $0x00000800 + JAE emit_copy_three_standalone + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end + +emit_copy_three_standalone: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end: + MOVQ BX, ret+40(FP) + RET + +// func emitCopyNoRepeat(dst []byte, offset int, length int) int +TEXT ·emitCopyNoRepeat(SB), NOSPLIT, $0-48 + XORQ BX, BX + MOVQ dst_base+0(FP), AX + MOVQ offset+24(FP), CX + MOVQ length+32(FP), DX + + // emitCopy + CMPL CX, $0x00010000 + JB two_byte_offset_standalone_snappy + +four_bytes_loop_back_standalone_snappy: + CMPL DX, $0x40 + JBE four_bytes_remain_standalone_snappy + MOVB $0xff, (AX) + MOVL CX, 1(AX) + LEAL -64(DX), DX + ADDQ $0x05, BX + ADDQ $0x05, AX + CMPL DX, $0x04 + JB four_bytes_remain_standalone_snappy + JMP four_bytes_loop_back_standalone_snappy + +four_bytes_remain_standalone_snappy: + TESTL DX, DX + JZ gen_emit_copy_end_snappy + XORL SI, SI + LEAL -1(SI)(DX*4), DX + MOVB DL, (AX) + MOVL CX, 1(AX) + ADDQ $0x05, BX + ADDQ $0x05, AX + JMP gen_emit_copy_end_snappy + +two_byte_offset_standalone_snappy: + CMPL DX, $0x40 + JBE two_byte_offset_short_standalone_snappy + MOVB $0xee, (AX) + MOVW CX, 1(AX) + LEAL -60(DX), DX + ADDQ $0x03, AX + ADDQ $0x03, BX + JMP two_byte_offset_standalone_snappy + +two_byte_offset_short_standalone_snappy: + MOVL DX, SI + SHLL $0x02, SI + CMPL DX, $0x0c + JAE emit_copy_three_standalone_snappy + CMPL CX, $0x00000800 + JAE emit_copy_three_standalone_snappy + LEAL -15(SI), SI + MOVB CL, 1(AX) + SHRL $0x08, CX + SHLL $0x05, CX + ORL CX, SI + MOVB SI, (AX) + ADDQ $0x02, BX + ADDQ $0x02, AX + JMP gen_emit_copy_end_snappy + +emit_copy_three_standalone_snappy: + LEAL -2(SI), SI + MOVB SI, (AX) + MOVW CX, 1(AX) + ADDQ $0x03, BX + ADDQ $0x03, AX + +gen_emit_copy_end_snappy: + MOVQ BX, ret+40(FP) + RET + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX + +#else + BSFQ BX, BX + +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x01 + JE matchlen_match1_standalone + JB gen_match_len_end + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL 2(SI), SI + SUBL $0x02, DX + JZ gen_match_len_end + +matchlen_match1_standalone: + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + LEAL 1(SI), SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET + +// func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + XORQ DI, DI + +lz4_s2_loop: + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ AX, CX + JAE lz4_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4_s2_ll_end + +lz4_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4_s2_ll_loop + +lz4_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x04, R10 + CMPQ R8, BX + JAE lz4_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JB one_byte_lz4_s2 + CMPL R11, $0x00000100 + JB two_bytes_lz4_s2 + CMPL R11, $0x00010000 + JB three_bytes_lz4_s2 + CMPL R11, $0x01000000 + JB four_bytes_lz4_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_s2 + +four_bytes_lz4_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_s2 + +three_bytes_lz4_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_s2 + +two_bytes_lz4_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JB memmove_lz4_s2 + JMP memmove_long_lz4_s2 + +one_byte_lz4_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_lz4_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4_s2_memmove_move_33through64 + +emit_lit_memmove_lz4_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4_s2 + +emit_lit_memmove_lz4_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4_s2: + MOVQ R11, AX + JMP lz4_s2_lits_emit_done + +memmove_long_lz4_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4_s2large_big_loop_back + +emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4_s2_lits_emit_done: + MOVQ R8, DX + +lz4_s2_lits_done: + CMPQ DX, BX + JNE lz4_s2_match + CMPQ R10, $0x04 + JEQ lz4_s2_done + JMP lz4_s2_corrupt + +lz4_s2_match: + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4_s2_corrupt + CMPQ R9, SI + JA lz4_s2_corrupt + CMPQ R10, $0x13 + JNE lz4_s2_ml_done + +lz4_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4_s2_corrupt + CMPQ R8, $0xff + JEQ lz4_s2_ml_loop + +lz4_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2 + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +lz4_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4_s2_loop + +lz4_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + XORQ DI, DI + +lz4s_s2_loop: + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ AX, CX + JAE lz4s_s2_dstfull + MOVBQZX (DX), R8 + MOVQ R8, R9 + MOVQ R8, R10 + SHRQ $0x04, R9 + ANDQ $0x0f, R10 + CMPQ R8, $0xf0 + JB lz4s_s2_ll_end + +lz4s_s2_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_s2_corrupt + MOVBQZX (DX), R8 + ADDQ R8, R9 + CMPQ R8, $0xff + JEQ lz4s_s2_ll_loop + +lz4s_s2_ll_end: + LEAQ (DX)(R9*1), R8 + ADDQ $0x03, R10 + CMPQ R8, BX + JAE lz4s_s2_corrupt + INCQ DX + INCQ R8 + TESTQ R9, R9 + JZ lz4s_s2_lits_done + LEAQ (AX)(R9*1), R11 + CMPQ R11, CX + JAE lz4s_s2_dstfull + ADDQ R9, SI + LEAL -1(R9), R11 + CMPL R11, $0x3c + JB one_byte_lz4s_s2 + CMPL R11, $0x00000100 + JB two_bytes_lz4s_s2 + CMPL R11, $0x00010000 + JB three_bytes_lz4s_s2 + CMPL R11, $0x01000000 + JB four_bytes_lz4s_s2 + MOVB $0xfc, (AX) + MOVL R11, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_s2 + +four_bytes_lz4s_s2: + MOVL R11, R12 + SHRL $0x10, R12 + MOVB $0xf8, (AX) + MOVW R11, 1(AX) + MOVB R12, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_s2 + +three_bytes_lz4s_s2: + MOVB $0xf4, (AX) + MOVW R11, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_s2 + +two_bytes_lz4s_s2: + MOVB $0xf0, (AX) + MOVB R11, 1(AX) + ADDQ $0x02, AX + CMPL R11, $0x40 + JB memmove_lz4s_s2 + JMP memmove_long_lz4s_s2 + +one_byte_lz4s_s2: + SHLB $0x02, R11 + MOVB R11, (AX) + ADDQ $0x01, AX + +memmove_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveShort + CMPQ R9, $0x08 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8 + CMPQ R9, $0x10 + JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16 + CMPQ R9, $0x20 + JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64 + +emit_lit_memmove_lz4s_s2_memmove_move_8: + MOVQ (DX), R12 + MOVQ R12, (AX) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_8through16: + MOVQ (DX), R12 + MOVQ -8(DX)(R9*1), DX + MOVQ R12, (AX) + MOVQ DX, -8(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R9*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R9*1) + JMP memmove_end_copy_lz4s_s2 + +emit_lit_memmove_lz4s_s2_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + +memmove_end_copy_lz4s_s2: + MOVQ R11, AX + JMP lz4s_s2_lits_emit_done + +memmove_long_lz4s_s2: + LEAQ (AX)(R9*1), R11 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R9*1), X2 + MOVOU -16(DX)(R9*1), X3 + MOVQ R9, R13 + SHRQ $0x05, R13 + MOVQ AX, R12 + ANDL $0x0000001f, R12 + MOVQ $0x00000040, R14 + SUBQ R12, R14 + DECQ R13 + JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + LEAQ -32(DX)(R14*1), R12 + LEAQ -32(AX)(R14*1), R15 + +emit_lit_memmove_long_lz4s_s2large_big_loop_back: + MOVOU (R12), X4 + MOVOU 16(R12), X5 + MOVOA X4, (R15) + MOVOA X5, 16(R15) + ADDQ $0x20, R15 + ADDQ $0x20, R12 + ADDQ $0x20, R14 + DECQ R13 + JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back + +emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32: + MOVOU -32(DX)(R14*1), X4 + MOVOU -16(DX)(R14*1), X5 + MOVOA X4, -32(AX)(R14*1) + MOVOA X5, -16(AX)(R14*1) + ADDQ $0x20, R14 + CMPQ R9, R14 + JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R9*1) + MOVOU X3, -16(AX)(R9*1) + MOVQ R11, AX + +lz4s_s2_lits_emit_done: + MOVQ R8, DX + +lz4s_s2_lits_done: + CMPQ DX, BX + JNE lz4s_s2_match + CMPQ R10, $0x03 + JEQ lz4s_s2_done + JMP lz4s_s2_corrupt + +lz4s_s2_match: + CMPQ R10, $0x03 + JEQ lz4s_s2_loop + LEAQ 2(DX), R8 + CMPQ R8, BX + JAE lz4s_s2_corrupt + MOVWQZX (DX), R9 + MOVQ R8, DX + TESTQ R9, R9 + JZ lz4s_s2_corrupt + CMPQ R9, SI + JA lz4s_s2_corrupt + CMPQ R10, $0x12 + JNE lz4s_s2_ml_done + +lz4s_s2_ml_loop: + MOVBQZX (DX), R8 + INCQ DX + ADDQ R8, R10 + CMPQ DX, BX + JAE lz4s_s2_corrupt + CMPQ R8, $0xff + JEQ lz4s_s2_ml_loop + +lz4s_s2_ml_done: + ADDQ R10, SI + CMPQ R9, DI + JNE lz4s_s2_docopy + + // emitRepeat +emit_repeat_again_lz4_s2: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2 + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2 + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2 + +cant_repeat_two_offset_lz4_s2: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2 + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2 + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2 + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2 + +repeat_five_lz4_s2: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +lz4s_s2_docopy: + MOVQ R9, DI + + // emitCopy + CMPL R10, $0x40 + JBE two_byte_offset_short_lz4_s2 + CMPL R9, $0x00000800 + JAE long_offset_short_lz4_s2 + MOVL $0x00000001, R8 + LEAL 16(R8), R8 + MOVB R9, 1(AX) + MOVL R9, R11 + SHRL $0x08, R11 + SHLL $0x05, R11 + ORL R11, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + SUBL $0x08, R10 + + // emitRepeat + LEAL -4(R10), R10 + JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + +emit_repeat_again_lz4_s2_emit_copy_short_2b: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short_2b + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short_2b + +cant_repeat_two_offset_lz4_s2_emit_copy_short_2b: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short_2b + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short_2b + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short_2b + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short_2b + +repeat_five_lz4_s2_emit_copy_short_2b: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short_2b: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short_2b: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short_2b: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short_2b: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +long_offset_short_lz4_s2: + MOVB $0xee, (AX) + MOVW R9, 1(AX) + LEAL -60(R10), R10 + ADDQ $0x03, AX + + // emitRepeat +emit_repeat_again_lz4_s2_emit_copy_short: + MOVL R10, R8 + LEAL -4(R10), R10 + CMPL R8, $0x08 + JBE repeat_two_lz4_s2_emit_copy_short + CMPL R8, $0x0c + JAE cant_repeat_two_offset_lz4_s2_emit_copy_short + CMPL R9, $0x00000800 + JB repeat_two_offset_lz4_s2_emit_copy_short + +cant_repeat_two_offset_lz4_s2_emit_copy_short: + CMPL R10, $0x00000104 + JB repeat_three_lz4_s2_emit_copy_short + CMPL R10, $0x00010100 + JB repeat_four_lz4_s2_emit_copy_short + CMPL R10, $0x0100ffff + JB repeat_five_lz4_s2_emit_copy_short + LEAL -16842747(R10), R10 + MOVL $0xfffb001d, (AX) + MOVB $0xff, 4(AX) + ADDQ $0x05, AX + JMP emit_repeat_again_lz4_s2_emit_copy_short + +repeat_five_lz4_s2_emit_copy_short: + LEAL -65536(R10), R10 + MOVL R10, R9 + MOVW $0x001d, (AX) + MOVW R10, 2(AX) + SARL $0x10, R9 + MOVB R9, 4(AX) + ADDQ $0x05, AX + JMP lz4s_s2_loop + +repeat_four_lz4_s2_emit_copy_short: + LEAL -256(R10), R10 + MOVW $0x0019, (AX) + MOVW R10, 2(AX) + ADDQ $0x04, AX + JMP lz4s_s2_loop + +repeat_three_lz4_s2_emit_copy_short: + LEAL -4(R10), R10 + MOVW $0x0015, (AX) + MOVB R10, 2(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +repeat_two_lz4_s2_emit_copy_short: + SHLL $0x02, R10 + ORL $0x01, R10 + MOVW R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +repeat_two_offset_lz4_s2_emit_copy_short: + XORQ R8, R8 + LEAL 1(R8)(R10*4), R10 + MOVB R9, 1(AX) + SARL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R10 + MOVB R10, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +two_byte_offset_short_lz4_s2: + MOVL R10, R8 + SHLL $0x02, R8 + CMPL R10, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R9, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(R8), R8 + MOVB R9, 1(AX) + SHRL $0x08, R9 + SHLL $0x05, R9 + ORL R9, R8 + MOVB R8, (AX) + ADDQ $0x02, AX + JMP lz4s_s2_loop + +emit_copy_three_lz4_s2: + LEAL -2(R8), R8 + MOVB R8, (AX) + MOVW R9, 1(AX) + ADDQ $0x03, AX + JMP lz4s_s2_loop + +lz4s_s2_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_s2_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_s2_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + +lz4_snappy_loop: + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ AX, CX + JAE lz4_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4_snappy_ll_end + +lz4_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4_snappy_ll_loop + +lz4_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x04, R9 + CMPQ DI, BX + JAE lz4_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JB one_byte_lz4_snappy + CMPL R10, $0x00000100 + JB two_bytes_lz4_snappy + CMPL R10, $0x00010000 + JB three_bytes_lz4_snappy + CMPL R10, $0x01000000 + JB four_bytes_lz4_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4_snappy + +four_bytes_lz4_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4_snappy + +three_bytes_lz4_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4_snappy + +two_bytes_lz4_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JB memmove_lz4_snappy + JMP memmove_long_lz4_snappy + +one_byte_lz4_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4_snappy + +emit_lit_memmove_lz4_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4_snappy: + MOVQ R10, AX + JMP lz4_snappy_lits_emit_done + +memmove_long_lz4_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4_snappy_lits_emit_done: + MOVQ DI, DX + +lz4_snappy_lits_done: + CMPQ DX, BX + JNE lz4_snappy_match + CMPQ R9, $0x04 + JEQ lz4_snappy_done + JMP lz4_snappy_corrupt + +lz4_snappy_match: + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4_snappy_corrupt + CMPQ R8, SI + JA lz4_snappy_corrupt + CMPQ R9, $0x13 + JNE lz4_snappy_ml_done + +lz4_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4_snappy_ml_loop + +lz4_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JBE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4_snappy_loop + +lz4_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) +// Requires: SSE2 +TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64 + XORQ SI, SI + MOVQ dst_base+0(FP), AX + MOVQ dst_len+8(FP), CX + MOVQ src_base+24(FP), DX + MOVQ src_len+32(FP), BX + LEAQ (DX)(BX*1), BX + LEAQ -10(AX)(CX*1), CX + +lz4s_snappy_loop: + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ AX, CX + JAE lz4s_snappy_dstfull + MOVBQZX (DX), DI + MOVQ DI, R8 + MOVQ DI, R9 + SHRQ $0x04, R8 + ANDQ $0x0f, R9 + CMPQ DI, $0xf0 + JB lz4s_snappy_ll_end + +lz4s_snappy_ll_loop: + INCQ DX + CMPQ DX, BX + JAE lz4s_snappy_corrupt + MOVBQZX (DX), DI + ADDQ DI, R8 + CMPQ DI, $0xff + JEQ lz4s_snappy_ll_loop + +lz4s_snappy_ll_end: + LEAQ (DX)(R8*1), DI + ADDQ $0x03, R9 + CMPQ DI, BX + JAE lz4s_snappy_corrupt + INCQ DX + INCQ DI + TESTQ R8, R8 + JZ lz4s_snappy_lits_done + LEAQ (AX)(R8*1), R10 + CMPQ R10, CX + JAE lz4s_snappy_dstfull + ADDQ R8, SI + LEAL -1(R8), R10 + CMPL R10, $0x3c + JB one_byte_lz4s_snappy + CMPL R10, $0x00000100 + JB two_bytes_lz4s_snappy + CMPL R10, $0x00010000 + JB three_bytes_lz4s_snappy + CMPL R10, $0x01000000 + JB four_bytes_lz4s_snappy + MOVB $0xfc, (AX) + MOVL R10, 1(AX) + ADDQ $0x05, AX + JMP memmove_long_lz4s_snappy + +four_bytes_lz4s_snappy: + MOVL R10, R11 + SHRL $0x10, R11 + MOVB $0xf8, (AX) + MOVW R10, 1(AX) + MOVB R11, 3(AX) + ADDQ $0x04, AX + JMP memmove_long_lz4s_snappy + +three_bytes_lz4s_snappy: + MOVB $0xf4, (AX) + MOVW R10, 1(AX) + ADDQ $0x03, AX + JMP memmove_long_lz4s_snappy + +two_bytes_lz4s_snappy: + MOVB $0xf0, (AX) + MOVB R10, 1(AX) + ADDQ $0x02, AX + CMPL R10, $0x40 + JB memmove_lz4s_snappy + JMP memmove_long_lz4s_snappy + +one_byte_lz4s_snappy: + SHLB $0x02, R10 + MOVB R10, (AX) + ADDQ $0x01, AX + +memmove_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveShort + CMPQ R8, $0x08 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8 + CMPQ R8, $0x10 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16 + CMPQ R8, $0x20 + JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32 + JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64 + +emit_lit_memmove_lz4s_snappy_memmove_move_8: + MOVQ (DX), R11 + MOVQ R11, (AX) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_8through16: + MOVQ (DX), R11 + MOVQ -8(DX)(R8*1), DX + MOVQ R11, (AX) + MOVQ DX, -8(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_17through32: + MOVOU (DX), X0 + MOVOU -16(DX)(R8*1), X1 + MOVOU X0, (AX) + MOVOU X1, -16(AX)(R8*1) + JMP memmove_end_copy_lz4s_snappy + +emit_lit_memmove_lz4s_snappy_memmove_move_33through64: + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + +memmove_end_copy_lz4s_snappy: + MOVQ R10, AX + JMP lz4s_snappy_lits_emit_done + +memmove_long_lz4s_snappy: + LEAQ (AX)(R8*1), R10 + + // genMemMoveLong + MOVOU (DX), X0 + MOVOU 16(DX), X1 + MOVOU -32(DX)(R8*1), X2 + MOVOU -16(DX)(R8*1), X3 + MOVQ R8, R12 + SHRQ $0x05, R12 + MOVQ AX, R11 + ANDL $0x0000001f, R11 + MOVQ $0x00000040, R13 + SUBQ R11, R13 + DECQ R12 + JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + LEAQ -32(DX)(R13*1), R11 + LEAQ -32(AX)(R13*1), R14 + +emit_lit_memmove_long_lz4s_snappylarge_big_loop_back: + MOVOU (R11), X4 + MOVOU 16(R11), X5 + MOVOA X4, (R14) + MOVOA X5, 16(R14) + ADDQ $0x20, R14 + ADDQ $0x20, R11 + ADDQ $0x20, R13 + DECQ R12 + JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back + +emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32: + MOVOU -32(DX)(R13*1), X4 + MOVOU -16(DX)(R13*1), X5 + MOVOA X4, -32(AX)(R13*1) + MOVOA X5, -16(AX)(R13*1) + ADDQ $0x20, R13 + CMPQ R8, R13 + JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, -32(AX)(R8*1) + MOVOU X3, -16(AX)(R8*1) + MOVQ R10, AX + +lz4s_snappy_lits_emit_done: + MOVQ DI, DX + +lz4s_snappy_lits_done: + CMPQ DX, BX + JNE lz4s_snappy_match + CMPQ R9, $0x03 + JEQ lz4s_snappy_done + JMP lz4s_snappy_corrupt + +lz4s_snappy_match: + CMPQ R9, $0x03 + JEQ lz4s_snappy_loop + LEAQ 2(DX), DI + CMPQ DI, BX + JAE lz4s_snappy_corrupt + MOVWQZX (DX), R8 + MOVQ DI, DX + TESTQ R8, R8 + JZ lz4s_snappy_corrupt + CMPQ R8, SI + JA lz4s_snappy_corrupt + CMPQ R9, $0x12 + JNE lz4s_snappy_ml_done + +lz4s_snappy_ml_loop: + MOVBQZX (DX), DI + INCQ DX + ADDQ DI, R9 + CMPQ DX, BX + JAE lz4s_snappy_corrupt + CMPQ DI, $0xff + JEQ lz4s_snappy_ml_loop + +lz4s_snappy_ml_done: + ADDQ R9, SI + + // emitCopy +two_byte_offset_lz4_s2: + CMPL R9, $0x40 + JBE two_byte_offset_short_lz4_s2 + MOVB $0xee, (AX) + MOVW R8, 1(AX) + LEAL -60(R9), R9 + ADDQ $0x03, AX + CMPQ AX, CX + JAE lz4s_snappy_loop + JMP two_byte_offset_lz4_s2 + +two_byte_offset_short_lz4_s2: + MOVL R9, DI + SHLL $0x02, DI + CMPL R9, $0x0c + JAE emit_copy_three_lz4_s2 + CMPL R8, $0x00000800 + JAE emit_copy_three_lz4_s2 + LEAL -15(DI), DI + MOVB R8, 1(AX) + SHRL $0x08, R8 + SHLL $0x05, R8 + ORL R8, DI + MOVB DI, (AX) + ADDQ $0x02, AX + JMP lz4s_snappy_loop + +emit_copy_three_lz4_s2: + LEAL -2(DI), DI + MOVB DI, (AX) + MOVW R8, 1(AX) + ADDQ $0x03, AX + JMP lz4s_snappy_loop + +lz4s_snappy_done: + MOVQ dst_base+0(FP), CX + SUBQ CX, AX + MOVQ SI, uncompressed+48(FP) + MOVQ AX, dstUsed+56(FP) + RET + +lz4s_snappy_corrupt: + XORQ AX, AX + LEAQ -1(AX), SI + MOVQ SI, uncompressed+48(FP) + RET + +lz4s_snappy_dstfull: + XORQ AX, AX + LEAQ -2(AX), SI + MOVQ SI, uncompressed+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/s2/index.go b/vendor/github.com/klauspost/compress/s2/index.go new file mode 100644 index 00000000000..dd9ecfe7185 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/index.go @@ -0,0 +1,598 @@ +// Copyright (c) 2022+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "sort" +) + +const ( + S2IndexHeader = "s2idx\x00" + S2IndexTrailer = "\x00xdi2s" + maxIndexEntries = 1 << 16 +) + +// Index represents an S2/Snappy index. +type Index struct { + TotalUncompressed int64 // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 // Total Compressed size if known. Will be -1 if unknown. + info []struct { + compressedOffset int64 + uncompressedOffset int64 + } + estBlockUncomp int64 +} + +func (i *Index) reset(maxBlock int) { + i.estBlockUncomp = int64(maxBlock) + i.TotalCompressed = -1 + i.TotalUncompressed = -1 + if len(i.info) > 0 { + i.info = i.info[:0] + } +} + +// allocInfos will allocate an empty slice of infos. +func (i *Index) allocInfos(n int) { + if n > maxIndexEntries { + panic("n > maxIndexEntries") + } + i.info = make([]struct { + compressedOffset int64 + uncompressedOffset int64 + }, 0, n) +} + +// add an uncompressed and compressed pair. +// Entries must be sent in order. +func (i *Index) add(compressedOffset, uncompressedOffset int64) error { + if i == nil { + return nil + } + lastIdx := len(i.info) - 1 + if lastIdx >= 0 { + latest := i.info[lastIdx] + if latest.uncompressedOffset == uncompressedOffset { + // Uncompressed didn't change, don't add entry, + // but update start index. + latest.compressedOffset = compressedOffset + i.info[lastIdx] = latest + return nil + } + if latest.uncompressedOffset > uncompressedOffset { + return fmt.Errorf("internal error: Earlier uncompressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + if latest.compressedOffset > compressedOffset { + return fmt.Errorf("internal error: Earlier compressed received (%d > %d)", latest.uncompressedOffset, uncompressedOffset) + } + } + i.info = append(i.info, struct { + compressedOffset int64 + uncompressedOffset int64 + }{compressedOffset: compressedOffset, uncompressedOffset: uncompressedOffset}) + return nil +} + +// Find the offset at or before the wanted (uncompressed) offset. +// If offset is 0 or positive it is the offset from the beginning of the file. +// If the uncompressed size is known, the offset must be within the file. +// If an offset outside the file is requested io.ErrUnexpectedEOF is returned. +// If the offset is negative, it is interpreted as the distance from the end of the file, +// where -1 represents the last byte. +// If offset from the end of the file is requested, but size is unknown, +// ErrUnsupported will be returned. +func (i *Index) Find(offset int64) (compressedOff, uncompressedOff int64, err error) { + if i.TotalUncompressed < 0 { + return 0, 0, ErrCorrupt + } + if offset < 0 { + offset = i.TotalUncompressed + offset + if offset < 0 { + return 0, 0, io.ErrUnexpectedEOF + } + } + if offset > i.TotalUncompressed { + return 0, 0, io.ErrUnexpectedEOF + } + if len(i.info) > 200 { + n := sort.Search(len(i.info), func(n int) bool { + return i.info[n].uncompressedOffset > offset + }) + if n == 0 { + n = 1 + } + return i.info[n-1].compressedOffset, i.info[n-1].uncompressedOffset, nil + } + for _, info := range i.info { + if info.uncompressedOffset > offset { + break + } + compressedOff = info.compressedOffset + uncompressedOff = info.uncompressedOffset + } + return compressedOff, uncompressedOff, nil +} + +// reduce to stay below maxIndexEntries +func (i *Index) reduce() { + if len(i.info) < maxIndexEntries && i.estBlockUncomp >= 1<<20 { + return + } + + // Algorithm, keep 1, remove removeN entries... + removeN := (len(i.info) + 1) / maxIndexEntries + src := i.info + j := 0 + + // Each block should be at least 1MB, but don't reduce below 1000 entries. + for i.estBlockUncomp*(int64(removeN)+1) < 1<<20 && len(i.info)/(removeN+1) > 1000 { + removeN++ + } + for idx := 0; idx < len(src); idx++ { + i.info[j] = src[idx] + j++ + idx += removeN + } + i.info = i.info[:j] + // Update maxblock estimate. + i.estBlockUncomp += i.estBlockUncomp * int64(removeN) +} + +func (i *Index) appendTo(b []byte, uncompTotal, compTotal int64) []byte { + i.reduce() + var tmp [binary.MaxVarintLen64]byte + + initSize := len(b) + // We make the start a skippable header+size. + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + // Total Uncompressed size + n := binary.PutVarint(tmp[:], uncompTotal) + b = append(b, tmp[:n]...) + // Total Compressed size + n = binary.PutVarint(tmp[:], compTotal) + b = append(b, tmp[:n]...) + // Put EstBlockUncomp size + n = binary.PutVarint(tmp[:], i.estBlockUncomp) + b = append(b, tmp[:n]...) + // Put length + n = binary.PutVarint(tmp[:], int64(len(i.info))) + b = append(b, tmp[:n]...) + + // Check if we should add uncompressed offsets + var hasUncompressed byte + for idx, info := range i.info { + if idx == 0 { + if info.uncompressedOffset != 0 { + hasUncompressed = 1 + break + } + continue + } + if info.uncompressedOffset != i.info[idx-1].uncompressedOffset+i.estBlockUncomp { + hasUncompressed = 1 + break + } + } + b = append(b, hasUncompressed) + + // Add each entry + if hasUncompressed == 1 { + for idx, info := range i.info { + uOff := info.uncompressedOffset + if idx > 0 { + prev := i.info[idx-1] + uOff -= prev.uncompressedOffset + (i.estBlockUncomp) + } + n = binary.PutVarint(tmp[:], uOff) + b = append(b, tmp[:n]...) + } + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + for idx, info := range i.info { + cOff := info.compressedOffset + if idx > 0 { + prev := i.info[idx-1] + cOff -= prev.compressedOffset + cPredict + // Update compressed size prediction, with half the error. + cPredict += cOff / 2 + } + n = binary.PutVarint(tmp[:], cOff) + b = append(b, tmp[:n]...) + } + + // Add Total Size. + // Stored as fixed size for easier reading. + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)-initSize+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + // Update size + chunkLen := len(b) - initSize - skippableFrameHeader + b[initSize+1] = uint8(chunkLen >> 0) + b[initSize+2] = uint8(chunkLen >> 8) + b[initSize+3] = uint8(chunkLen >> 16) + //fmt.Printf("chunklen: 0x%x Uncomp:%d, Comp:%d\n", chunkLen, uncompTotal, compTotal) + return b +} + +// Load a binary index. +// A zero value Index can be used or a previous one can be reused. +func (i *Index) Load(b []byte) ([]byte, error) { + if len(b) <= 4+len(S2IndexHeader)+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + if b[0] != ChunkTypeIndex { + return b, ErrCorrupt + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return b, io.ErrUnexpectedEOF + } + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return b, ErrUnsupported + } + b = b[len(S2IndexHeader):] + + // Total Uncompressed + if v, n := binary.Varint(b); n <= 0 || v < 0 { + return b, ErrCorrupt + } else { + i.TotalUncompressed = v + b = b[n:] + } + + // Total Compressed + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + i.TotalCompressed = v + b = b[n:] + } + + // Read EstBlockUncomp + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 { + return b, ErrCorrupt + } + i.estBlockUncomp = v + b = b[n:] + } + + var entries int + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + if v < 0 || v > maxIndexEntries { + return b, ErrCorrupt + } + entries = int(v) + b = b[n:] + } + if cap(i.info) < entries { + i.allocInfos(entries) + } + i.info = i.info[:entries] + + if len(b) < 1 { + return b, io.ErrUnexpectedEOF + } + hasUncompressed := b[0] + b = b[1:] + if hasUncompressed&1 != hasUncompressed { + return b, ErrCorrupt + } + + // Add each uncompressed entry + for idx := range i.info { + var uOff int64 + if hasUncompressed != 0 { + // Load delta + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + uOff = v + b = b[n:] + } + } + + if idx > 0 { + prev := i.info[idx-1].uncompressedOffset + uOff += prev + (i.estBlockUncomp) + if uOff <= prev { + return b, ErrCorrupt + } + } + if uOff < 0 { + return b, ErrCorrupt + } + i.info[idx].uncompressedOffset = uOff + } + + // Initial compressed size estimate. + cPredict := i.estBlockUncomp / 2 + + // Add each compressed entry + for idx := range i.info { + var cOff int64 + if v, n := binary.Varint(b); n <= 0 { + return b, ErrCorrupt + } else { + cOff = v + b = b[n:] + } + + if idx > 0 { + // Update compressed size prediction, with half the error. + cPredictNew := cPredict + cOff/2 + + prev := i.info[idx-1].compressedOffset + cOff += prev + cPredict + if cOff <= prev { + return b, ErrCorrupt + } + cPredict = cPredictNew + } + if cOff < 0 { + return b, ErrCorrupt + } + i.info[idx].compressedOffset = cOff + } + if len(b) < 4+len(S2IndexTrailer) { + return b, io.ErrUnexpectedEOF + } + // Skip size... + b = b[4:] + + // Check trailer... + if !bytes.Equal(b[:len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return b, ErrCorrupt + } + return b[len(S2IndexTrailer):], nil +} + +// LoadStream will load an index from the end of the supplied stream. +// ErrUnsupported will be returned if the signature cannot be found. +// ErrCorrupt will be returned if unexpected values are found. +// io.ErrUnexpectedEOF is returned if there are too few bytes. +// IO errors are returned as-is. +func (i *Index) LoadStream(rs io.ReadSeeker) error { + // Go to end. + _, err := rs.Seek(-10, io.SeekEnd) + if err != nil { + return err + } + var tmp [10]byte + _, err = io.ReadFull(rs, tmp[:]) + if err != nil { + return err + } + // Check trailer... + if !bytes.Equal(tmp[4:4+len(S2IndexTrailer)], []byte(S2IndexTrailer)) { + return ErrUnsupported + } + sz := binary.LittleEndian.Uint32(tmp[:4]) + if sz > maxChunkSize+skippableFrameHeader { + return ErrCorrupt + } + _, err = rs.Seek(-int64(sz), io.SeekEnd) + if err != nil { + return err + } + + // Read index. + buf := make([]byte, sz) + _, err = io.ReadFull(rs, buf) + if err != nil { + return err + } + _, err = i.Load(buf) + return err +} + +// IndexStream will return an index for a stream. +// The stream structure will be checked, but +// data within blocks is not verified. +// The returned index can either be appended to the end of the stream +// or stored separately. +func IndexStream(r io.Reader) ([]byte, error) { + var i Index + var buf [maxChunkSize]byte + var readHeader bool + for { + _, err := io.ReadFull(r, buf[:4]) + if err != nil { + if err == io.EOF { + return i.appendTo(nil, i.TotalUncompressed, i.TotalCompressed), nil + } + return nil, err + } + // Start of this chunk. + startChunk := i.TotalCompressed + i.TotalCompressed += 4 + + chunkType := buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + return nil, ErrCorrupt + } + readHeader = true + } + chunkLen := int(buf[1]) | int(buf[2])<<8 | int(buf[3])<<16 + if chunkLen < checksumSize { + return nil, ErrCorrupt + } + + i.TotalCompressed += int64(chunkLen) + _, err = io.ReadFull(r, buf[:chunkLen]) + if err != nil { + return nil, io.ErrUnexpectedEOF + } + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + // Skip checksum. + dLen, err := DecodedLen(buf[checksumSize:]) + if err != nil { + return nil, err + } + if dLen > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(dLen) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(dLen) + continue + case chunkTypeUncompressedData: + n2 := chunkLen - checksumSize + if n2 > maxBlockSize { + return nil, ErrCorrupt + } + if i.estBlockUncomp == 0 { + // Use first block for estimate... + i.estBlockUncomp = int64(n2) + } + err = i.add(startChunk, i.TotalUncompressed) + if err != nil { + return nil, err + } + i.TotalUncompressed += int64(n2) + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + return nil, ErrCorrupt + } + + if string(buf[:len(magicBody)]) != magicBody { + if string(buf[:len(magicBody)]) != magicBodySnappy { + return nil, ErrCorrupt + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + return nil, ErrUnsupported + } + if chunkLen > maxChunkSize { + return nil, ErrUnsupported + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + } +} + +// JSON returns the index as JSON text. +func (i *Index) JSON() []byte { + x := struct { + TotalUncompressed int64 `json:"total_uncompressed"` // Total Uncompressed size if known. Will be -1 if unknown. + TotalCompressed int64 `json:"total_compressed"` // Total Compressed size if known. Will be -1 if unknown. + Offsets []struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + } `json:"offsets"` + EstBlockUncomp int64 `json:"est_block_uncompressed"` + }{ + TotalUncompressed: i.TotalUncompressed, + TotalCompressed: i.TotalCompressed, + EstBlockUncomp: i.estBlockUncomp, + } + for _, v := range i.info { + x.Offsets = append(x.Offsets, struct { + CompressedOffset int64 `json:"compressed"` + UncompressedOffset int64 `json:"uncompressed"` + }{CompressedOffset: v.compressedOffset, UncompressedOffset: v.uncompressedOffset}) + } + b, _ := json.MarshalIndent(x, "", " ") + return b +} + +// RemoveIndexHeaders will trim all headers and trailers from a given index. +// This is expected to save 20 bytes. +// These can be restored using RestoreIndexHeaders. +// This removes a layer of security, but is the most compact representation. +// Returns nil if headers contains errors. +// The returned slice references the provided slice. +func RemoveIndexHeaders(b []byte) []byte { + const save = 4 + len(S2IndexHeader) + len(S2IndexTrailer) + 4 + if len(b) <= save { + return nil + } + if b[0] != ChunkTypeIndex { + return nil + } + chunkLen := int(b[1]) | int(b[2])<<8 | int(b[3])<<16 + b = b[4:] + + // Validate we have enough... + if len(b) < chunkLen { + return nil + } + b = b[:chunkLen] + + if !bytes.Equal(b[:len(S2IndexHeader)], []byte(S2IndexHeader)) { + return nil + } + b = b[len(S2IndexHeader):] + if !bytes.HasSuffix(b, []byte(S2IndexTrailer)) { + return nil + } + b = bytes.TrimSuffix(b, []byte(S2IndexTrailer)) + + if len(b) < 4 { + return nil + } + return b[:len(b)-4] +} + +// RestoreIndexHeaders will index restore headers removed by RemoveIndexHeaders. +// No error checking is performed on the input. +// If a 0 length slice is sent, it is returned without modification. +func RestoreIndexHeaders(in []byte) []byte { + if len(in) == 0 { + return in + } + b := make([]byte, 0, 4+len(S2IndexHeader)+len(in)+len(S2IndexTrailer)+4) + b = append(b, ChunkTypeIndex, 0, 0, 0) + b = append(b, []byte(S2IndexHeader)...) + b = append(b, in...) + + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(len(b)+4+len(S2IndexTrailer))) + b = append(b, tmp[:4]...) + // Trailer + b = append(b, []byte(S2IndexTrailer)...) + + chunkLen := len(b) - skippableFrameHeader + b[1] = uint8(chunkLen >> 0) + b[2] = uint8(chunkLen >> 8) + b[3] = uint8(chunkLen >> 16) + return b +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4convert.go b/vendor/github.com/klauspost/compress/s2/lz4convert.go new file mode 100644 index 00000000000..46ed908e3c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4convert.go @@ -0,0 +1,585 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// LZ4Converter provides conversion from LZ4 blocks as defined here: +// https://github.com/lz4/lz4/blob/dev/doc/lz4_Block_format.md +type LZ4Converter struct { +} + +// ErrDstTooSmall is returned when provided destination is too small. +var ErrDstTooSmall = errors.New("s2: destination too small") + +// ConvertBlock will convert an LZ4 block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4 block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4Converter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 4 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4BlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if s == len(src) && ml == lz4MinMatch { + break + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// emitRepeat writes a repeat chunk and returns the number of bytes written. +// Length must be at least 4 and < 1<<24 +func emitRepeat16(dst []byte, offset uint16, length int) int { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + return 2 + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + return 2 + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + return 3 + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + return 4 + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + return 5 + emitRepeat16(dst[5:], offset, left) + } + return 5 +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= math.MaxUint16 +// 4 <= length && length <= math.MaxUint32 +func emitCopy16(dst []byte, offset uint16, length int) int { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + return off + emitRepeat16(dst[off:], offset, length) + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + return 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + return 2 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 0 <= len(lit) && len(lit) <= math.MaxUint32 +func emitLiteralGo(dst, lit []byte) int { + if len(lit) == 0 { + return 0 + } + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[1] = uint8(n) + dst[0] = 60<<2 | tagLiteral + i = 2 + case n < 1<<16: + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 61<<2 | tagLiteral + i = 3 + case n < 1<<24: + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 62<<2 | tagLiteral + i = 4 + default: + dst[4] = uint8(n >> 24) + dst[3] = uint8(n >> 16) + dst[2] = uint8(n >> 8) + dst[1] = uint8(n) + dst[0] = 63<<2 | tagLiteral + i = 5 + } + return i + copy(dst[i:], lit) +} diff --git a/vendor/github.com/klauspost/compress/s2/lz4sconvert.go b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go new file mode 100644 index 00000000000..000f39719c5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/lz4sconvert.go @@ -0,0 +1,467 @@ +// Copyright (c) 2022 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "encoding/binary" + "fmt" +) + +// LZ4sConverter provides conversion from LZ4s. +// (Intel modified LZ4 Blocks) +// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf +// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format. +// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData. +// The LZ4s block returned by the Intel® QAT hardware can be used by an external +// software post-processing to generate other compressed data formats. +// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses +// the same high-level formatting as LZ4 block format with the following encoding changes: +// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte. +// ONLY "Min match of 4 bytes" is supported. +type LZ4sConverter struct { +} + +// ConvertBlock will convert an LZ4s block and append it as an S2 +// block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const inline = true + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var lastOffset uint16 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return dst[:d], 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return dst[:d], 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if offset == lastOffset { + if debug { + fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitRepeat16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Repeat offset, make length cheaper + length -= 4 + if length <= 4 { + dst[0] = uint8(length)<<2 | tagCopy1 + dst[1] = 0 + d += 2 + break + } + if length < 8 && offset < 2048 { + // Encode WITH offset + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1 + d += 2 + break + } + if length < (1<<8)+4 { + length -= 4 + dst[2] = uint8(length) + dst[1] = 0 + dst[0] = 5<<2 | tagCopy1 + d += 3 + break + } + if length < (1<<16)+(1<<8) { + length -= 1 << 8 + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 6<<2 | tagCopy1 + d += 4 + break + } + const maxRepeat = (1 << 24) - 1 + length -= 1 << 16 + left := 0 + if length > maxRepeat { + left = length - maxRepeat + 4 + length = maxRepeat - 4 + } + dst[4] = uint8(length >> 16) + dst[3] = uint8(length >> 8) + dst[2] = uint8(length >> 0) + dst[1] = 0 + dst[0] = 7<<2 | tagCopy1 + if left > 0 { + d += 5 + emitRepeat16(dst[5:], offset, left) + break + } + d += 5 + break + } + } + } else { + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + if !inline { + d += emitCopy16(dst[d:], offset, ml) + } else { + length := ml + dst := dst[d:] + for len(dst) > 5 { + // Offset no more than 2 bytes. + if length > 64 { + off := 3 + if offset < 2048 { + // emit 8 bytes as tagCopy1, rest as repeats. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1 + length -= 8 + off = 2 + } else { + // Emit a length 60 copy, encoded as 3 bytes. + // Emit remaining as repeat value (minimum 4 bytes). + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = 59<<2 | tagCopy2 + length -= 60 + } + // Emit remaining as repeats, at least 4 bytes remain. + d += off + emitRepeat16(dst[off:], offset, length) + break + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[2] = uint8(offset >> 8) + dst[1] = uint8(offset) + dst[0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[1] = uint8(offset) + dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + } + lastOffset = offset + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} + +// ConvertBlockSnappy will convert an LZ4s block and append it +// as a Snappy block without block length to dst. +// The uncompressed size is returned as well. +// dst must have capacity to contain the entire compressed block. +func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) { + if len(src) == 0 { + return dst, 0, nil + } + const debug = false + const lz4MinMatch = 3 + + s, d := 0, len(dst) + dst = dst[:cap(dst)] + // Use assembly when possible + if !debug && hasAmd64Asm { + res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src) + if res < 0 { + const ( + errCorrupt = -1 + errDstTooSmall = -2 + ) + switch res { + case errCorrupt: + return nil, 0, ErrCorrupt + case errDstTooSmall: + return nil, 0, ErrDstTooSmall + default: + return nil, 0, fmt.Errorf("unexpected result: %d", res) + } + } + if d+sz > len(dst) { + return nil, 0, ErrDstTooSmall + } + return dst[:d+sz], res, nil + } + + dLimit := len(dst) - 10 + var uncompressed int + if debug { + fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst)) + } + + for { + if s >= len(src) { + return nil, 0, ErrCorrupt + } + // Read literal info + token := src[s] + ll := int(token >> 4) + ml := int(lz4MinMatch + (token & 0xf)) + + // If upper nibble is 15, literal length is extended + if token >= 0xf0 { + for { + s++ + if s >= len(src) { + if debug { + fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + ll += int(val) + if val != 255 { + break + } + } + } + // Skip past token + if s+ll >= len(src) { + if debug { + fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src)) + } + return nil, 0, ErrCorrupt + } + s++ + if ll > 0 { + if d+ll > dLimit { + return nil, 0, ErrDstTooSmall + } + if debug { + fmt.Printf("emit %d literals\n", ll) + } + d += emitLiteralGo(dst[d:], src[s:s+ll]) + s += ll + uncompressed += ll + } + + // Check if we are done... + if ml == lz4MinMatch { + if s == len(src) { + break + } + // 0 bytes. + continue + } + // 2 byte offset + if s >= len(src)-2 { + if debug { + fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2) + } + return nil, 0, ErrCorrupt + } + offset := binary.LittleEndian.Uint16(src[s:]) + s += 2 + if offset == 0 { + if debug { + fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s) + } + return nil, 0, ErrCorrupt + } + if int(offset) > uncompressed { + if debug { + fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed) + } + return nil, 0, ErrCorrupt + } + + if ml == lz4MinMatch+15 { + for { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + val := src[s] + s++ + ml += int(val) + if val != 255 { + if s >= len(src) { + if debug { + fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src)) + } + return nil, 0, ErrCorrupt + } + break + } + } + } + if debug { + fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset) + } + length := ml + // d += emitCopyNoRepeat(dst[d:], int(offset), ml) + for length > 0 { + if d >= dLimit { + return nil, 0, ErrDstTooSmall + } + + // Offset no more than 2 bytes. + if length > 64 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = 63<<2 | tagCopy2 + length -= 64 + d += 3 + continue + } + if length >= 12 || offset >= 2048 || length < 4 { + // Emit the remaining copy, encoded as 3 bytes. + dst[d+2] = uint8(offset >> 8) + dst[d+1] = uint8(offset) + dst[d+0] = uint8(length-1)<<2 | tagCopy2 + d += 3 + break + } + // Emit the remaining copy, encoded as 2 bytes. + dst[d+1] = uint8(offset) + dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + d += 2 + break + } + uncompressed += ml + if d > dLimit { + return nil, 0, ErrDstTooSmall + } + } + + return dst[:d], uncompressed, nil +} diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go new file mode 100644 index 00000000000..2f01a3987fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/reader.go @@ -0,0 +1,1062 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "runtime" + "sync" +) + +// ErrCantSeek is returned if the stream cannot be seeked. +type ErrCantSeek struct { + Reason string +} + +// Error returns the error as string. +func (e ErrCantSeek) Error() string { + return fmt.Sprintf("s2: Can't seek because %s", e.Reason) +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt with S2 changes. +func NewReader(r io.Reader, opts ...ReaderOption) *Reader { + nr := Reader{ + r: r, + maxBlock: maxBlockSize, + } + for _, opt := range opts { + if err := opt(&nr); err != nil { + nr.err = err + return &nr + } + } + nr.maxBufSize = MaxEncodedLen(nr.maxBlock) + checksumSize + if nr.lazyBuf > 0 { + nr.buf = make([]byte, MaxEncodedLen(nr.lazyBuf)+checksumSize) + } else { + nr.buf = make([]byte, MaxEncodedLen(defaultBlockSize)+checksumSize) + } + nr.readHeader = nr.ignoreStreamID + nr.paramsOK = true + return &nr +} + +// ReaderOption is an option for creating a decoder. +type ReaderOption func(*Reader) error + +// ReaderMaxBlockSize allows to control allocations if the stream +// has been compressed with a smaller WriterBlockSize, or with the default 1MB. +// Blocks must be this size or smaller to decompress, +// otherwise the decoder will return ErrUnsupported. +// +// For streams compressed with Snappy this can safely be set to 64KB (64 << 10). +// +// Default is the maximum limit of 4MB. +func ReaderMaxBlockSize(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize <= 0 { + return errors.New("s2: block size too large. Must be <= 4MB and > 0") + } + if r.lazyBuf == 0 && blockSize < defaultBlockSize { + r.lazyBuf = blockSize + } + r.maxBlock = blockSize + return nil + } +} + +// ReaderAllocBlock allows to control upfront stream allocations +// and not allocate for frames bigger than this initially. +// If frames bigger than this is seen a bigger buffer will be allocated. +// +// Default is 1MB, which is default output size. +func ReaderAllocBlock(blockSize int) ReaderOption { + return func(r *Reader) error { + if blockSize > maxBlockSize || blockSize < 1024 { + return errors.New("s2: invalid ReaderAllocBlock. Must be <= 4MB and >= 1024") + } + r.lazyBuf = blockSize + return nil + } +} + +// ReaderIgnoreStreamIdentifier will make the reader skip the expected +// stream identifier at the beginning of the stream. +// This can be used when serving a stream that has been forwarded to a specific point. +func ReaderIgnoreStreamIdentifier() ReaderOption { + return func(r *Reader) error { + r.ignoreStreamID = true + return nil + } +} + +// ReaderSkippableCB will register a callback for chuncks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { + return func(r *Reader) error { + if id < 0x80 || id > 0xfd { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") + } + r.skippableCB[id] = fn + return nil + } +} + +// ReaderIgnoreCRC will make the reader skip CRC calculation and checks. +func ReaderIgnoreCRC() ReaderOption { + return func(r *Reader) error { + r.ignoreCRC = true + return nil + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + skippableCB [0x80]func(r io.Reader) error + blockStart int64 // Uncompressed offset at start of current. + index *Index + + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + // maximum block size allowed. + maxBlock int + // maximum expected buffer size. + maxBufSize int + // alloc a buffer this size if > 0. + lazyBuf int + readHeader bool + paramsOK bool + snappyFrame bool + ignoreStreamID bool + ignoreCRC bool +} + +// GetBufferCapacity returns the capacity of the internal buffer. +// This might be useful to know when reusing the same reader in combination +// with the lazy buffer option. +func (r *Reader) GetBufferCapacity() int { + return cap(r.buf) +} + +// ensureBufferSize will ensure that the buffer can take at least n bytes. +// If false is returned the buffer exceeds maximum allowed size. +func (r *Reader) ensureBufferSize(n int) bool { + if n > r.maxBufSize { + r.err = ErrCorrupt + return false + } + if cap(r.buf) >= n { + return true + } + // Realloc buffer. + r.buf = make([]byte, n) + return true +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + if !r.paramsOK { + return + } + r.index = nil + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.blockStart = 0 + r.readHeader = r.ignoreStreamID +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// skippable will skip n bytes. +// If the supplied reader supports seeking that is used. +// tmp is used as a temporary buffer for reading. +// The supplied slice does not need to be the size of the read. +func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { + if id < 0x80 { + r.err = fmt.Errorf("interbal error: skippable id < 0x80") + return false + } + if fn := r.skippableCB[id-0x80]; fn != nil { + rd := io.LimitReader(r.r, int64(n)) + r.err = fn(rd) + if r.err != nil { + return false + } + _, r.err = io.CopyBuffer(ioutil.Discard, rd, tmp) + return r.err == nil + } + if rs, ok := r.r.(io.ReadSeeker); ok { + _, err := rs.Seek(int64(n), io.SeekCurrent) + if err == nil { + return true + } + if err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + return false + } + } + for n > 0 { + if n < len(tmp) { + tmp = tmp[:n] + } + if _, r.err = io.ReadFull(r.r, tmp); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + n -= len(tmp) + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > len(r.decoded) { + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + r.decoded = make([]byte, n) + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if !r.ignoreCRC && crc(r.decoded[:n]) != checksum { + r.err = ErrCRC + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } +} + +// DecodeConcurrent will decode the full stream to w. +// This function should not be combined with reading, seeking or other operations. +// Up to 'concurrent' goroutines will be used. +// If <= 0, runtime.NumCPU will be used. +// On success the number of bytes decompressed nil and is returned. +// This is mainly intended for bigger streams. +func (r *Reader) DecodeConcurrent(w io.Writer, concurrent int) (written int64, err error) { + if r.i > 0 || r.j > 0 || r.blockStart > 0 { + return 0, errors.New("DecodeConcurrent called after ") + } + if concurrent <= 0 { + concurrent = runtime.NumCPU() + } + + // Write to output + var errMu sync.Mutex + var aErr error + setErr := func(e error) (ok bool) { + errMu.Lock() + defer errMu.Unlock() + if e == nil { + return aErr == nil + } + if aErr == nil { + aErr = e + } + return false + } + hasErr := func() (ok bool) { + errMu.Lock() + v := aErr != nil + errMu.Unlock() + return v + } + + var aWritten int64 + toRead := make(chan []byte, concurrent) + writtenBlocks := make(chan []byte, concurrent) + queue := make(chan chan []byte, concurrent) + reUse := make(chan chan []byte, concurrent) + for i := 0; i < concurrent; i++ { + toRead <- make([]byte, 0, r.maxBufSize) + writtenBlocks <- make([]byte, 0, r.maxBufSize) + reUse <- make(chan []byte, 1) + } + // Writer + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for toWrite := range queue { + entry := <-toWrite + reUse <- toWrite + if hasErr() { + writtenBlocks <- entry + continue + } + n, err := w.Write(entry) + want := len(entry) + writtenBlocks <- entry + if err != nil { + setErr(err) + continue + } + if n != want { + setErr(io.ErrShortWrite) + continue + } + aWritten += int64(n) + } + }() + + // Reader + defer func() { + close(queue) + if r.err != nil { + err = r.err + setErr(r.err) + } + wg.Wait() + if err == nil { + err = aErr + } + written = aWritten + }() + + for !hasErr() { + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = nil + } + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if chunkLen > r.maxBufSize { + r.err = ErrCorrupt + return 0, r.err + } + orgBuf := <-toRead + buf := orgBuf[:chunkLen] + + if !r.readFull(buf, false) { + return 0, r.err + } + + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + wg.Add(1) + + decoded := <-writtenBlocks + entry := <-reUse + queue <- entry + go func() { + defer wg.Done() + decoded = decoded[:n] + _, err := Decode(decoded, buf) + toRead <- orgBuf + if err != nil { + writtenBlocks <- decoded + setErr(err) + return + } + if !r.ignoreCRC && crc(decoded) != checksum { + writtenBlocks <- decoded + setErr(ErrCRC) + return + } + entry <- decoded + }() + continue + + case chunkTypeUncompressedData: + + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + if chunkLen > r.maxBufSize { + r.err = ErrCorrupt + return 0, r.err + } + // Grab write buffer + orgBuf := <-writtenBlocks + buf := orgBuf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read content. + n := chunkLen - checksumSize + + if r.snappyFrame && n > maxSnappyBlockSize { + r.err = ErrCorrupt + return 0, r.err + } + if n > r.maxBlock { + r.err = ErrCorrupt + return 0, r.err + } + // Read uncompressed + buf = orgBuf[:n] + if !r.readFull(buf, false) { + return 0, r.err + } + + if !r.ignoreCRC && crc(buf) != checksum { + r.err = ErrCRC + return 0, r.err + } + entry := <-reUse + queue <- entry + entry <- buf + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return 0, r.err + } else { + r.snappyFrame = true + } + } else { + r.snappyFrame = false + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + // fmt.Printf("ERR chunktype: 0x%x\n", chunkType) + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if chunkLen > maxChunkSize { + // fmt.Printf("ERR chunkLen: 0x%x\n", chunkLen) + r.err = ErrUnsupported + return 0, r.err + } + + // fmt.Printf("skippable: ID: 0x%x, len: 0x%x\n", chunkType, chunkLen) + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return 0, r.err + } + } + return 0, r.err +} + +// Skip will skip n bytes forward in the decompressed output. +// For larger skips this consumes less CPU and is faster than reading output and discarding it. +// CRC is not checked on skipped blocks. +// io.ErrUnexpectedEOF is returned if the stream ends before all bytes have been skipped. +// If a decoding error is encountered subsequent calls to Read will also fail. +func (r *Reader) Skip(n int64) error { + if n < 0 { + return errors.New("attempted negative skip") + } + if r.err != nil { + return r.err + } + + for n > 0 { + if r.i < r.j { + // Skip in buffer. + // decoded[i:j] contains decoded bytes that have not yet been passed on. + left := int64(r.j - r.i) + if left >= n { + tmp := int64(r.i) + n + if tmp > math.MaxInt32 { + return errors.New("s2: internal overflow in skip") + } + r.i = int(tmp) + return nil + } + n -= int64(r.j - r.i) + r.i = r.j + } + + // Buffer empty; read blocks until we have content. + if !r.readFull(r.buf[:4], true) { + if r.err == io.EOF { + r.err = io.ErrUnexpectedEOF + } + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + r.blockStart += int64(r.j) + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err == nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + dLen, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if dLen > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + // Check if destination is within this block + if int64(dLen) > n { + if len(r.decoded) < dLen { + r.decoded = make([]byte, dLen) + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:dLen]) != checksum { + r.err = ErrCorrupt + return r.err + } + } else { + // Skip block completely + n -= int64(dLen) + r.blockStart += int64(dLen) + dLen = 0 + } + r.i, r.j = 0, dLen + continue + case chunkTypeUncompressedData: + r.blockStart += int64(r.j) + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + if !r.ensureBufferSize(chunkLen) { + if r.err != nil { + r.err = ErrUnsupported + } + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n2 := chunkLen - checksumSize + if n2 > len(r.decoded) { + if n2 > r.maxBlock { + r.err = ErrCorrupt + return r.err + } + r.decoded = make([]byte, n2) + } + if !r.readFull(r.decoded[:n2], false) { + return r.err + } + if int64(n2) < n { + if crc(r.decoded[:n2]) != checksum { + r.err = ErrCorrupt + return r.err + } + } + r.i, r.j = 0, n2 + continue + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + if string(r.buf[:len(magicBody)]) != magicBody { + if string(r.buf[:len(magicBody)]) != magicBodySnappy { + r.err = ErrCorrupt + return r.err + } + } + + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + if chunkLen > maxChunkSize { + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.skippable(r.buf, chunkLen, false, chunkType) { + return r.err + } + } + return nil +} + +// ReadSeeker provides random or forward seeking in compressed content. +// See Reader.ReadSeeker +type ReadSeeker struct { + *Reader + readAtMu sync.Mutex +} + +// ReadSeeker will return an io.ReadSeeker and io.ReaderAt +// compatible version of the reader. +// If 'random' is specified the returned io.Seeker can be used for +// random seeking, otherwise only forward seeking is supported. +// Enabling random seeking requires the original input to support +// the io.Seeker interface. +// A custom index can be specified which will be used if supplied. +// When using a custom index, it will not be read from the input stream. +// The ReadAt position will affect regular reads and the current position of Seek. +// So using Read after ReadAt will continue from where the ReadAt stopped. +// No functions should be used concurrently. +// The returned ReadSeeker contains a shallow reference to the existing Reader, +// meaning changes performed to one is reflected in the other. +func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { + // Read index if provided. + if len(index) != 0 { + if r.index == nil { + r.index = &Index{} + } + if _, err := r.index.Load(index); err != nil { + return nil, ErrCantSeek{Reason: "loading index returned: " + err.Error()} + } + } + + // Check if input is seekable + rs, ok := r.r.(io.ReadSeeker) + if !ok { + if !random { + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream isn't seekable"} + } + + if r.index != nil { + // Seekable and index, ok... + return &ReadSeeker{Reader: r}, nil + } + + // Load from stream. + r.index = &Index{} + + // Read current position. + pos, err := rs.Seek(0, io.SeekCurrent) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + err = r.index.LoadStream(rs) + if err != nil { + if err == ErrUnsupported { + // If we don't require random seeking, reset input and return. + if !random { + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "resetting stream returned: " + err.Error()} + } + r.index = nil + return &ReadSeeker{Reader: r}, nil + } + return nil, ErrCantSeek{Reason: "input stream does not contain an index"} + } + return nil, ErrCantSeek{Reason: "reading index returned: " + err.Error()} + } + + // reset position. + _, err = rs.Seek(pos, io.SeekStart) + if err != nil { + return nil, ErrCantSeek{Reason: "seeking input returned: " + err.Error()} + } + return &ReadSeeker{Reader: r}, nil +} + +// Seek allows seeking in compressed data. +func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { + if r.err != nil { + if !errors.Is(r.err, io.EOF) { + return 0, r.err + } + // Reset on EOF + r.err = nil + } + + // Calculate absolute offset. + absOffset := offset + + switch whence { + case io.SeekStart: + case io.SeekCurrent: + absOffset = r.blockStart + int64(r.i) + offset + case io.SeekEnd: + if r.index == nil { + return 0, ErrUnsupported + } + absOffset = r.index.TotalUncompressed + offset + default: + r.err = ErrUnsupported + return 0, r.err + } + + if absOffset < 0 { + return 0, errors.New("seek before start of file") + } + + if !r.readHeader { + // Make sure we read the header. + _, r.err = r.Read([]byte{}) + if r.err != nil { + return 0, r.err + } + } + + // If we are inside current block no need to seek. + // This includes no offset changes. + if absOffset >= r.blockStart && absOffset < r.blockStart+int64(r.j) { + r.i = int(absOffset - r.blockStart) + return r.blockStart + int64(r.i), nil + } + + rs, ok := r.r.(io.ReadSeeker) + if r.index == nil || !ok { + currOffset := r.blockStart + int64(r.i) + if absOffset >= currOffset { + err := r.Skip(absOffset - currOffset) + return r.blockStart + int64(r.i), err + } + return 0, ErrUnsupported + } + + // We can seek and we have an index. + c, u, err := r.index.Find(absOffset) + if err != nil { + return r.blockStart + int64(r.i), err + } + + // Seek to next block + _, err = rs.Seek(c, io.SeekStart) + if err != nil { + return 0, err + } + + r.i = r.j // Remove rest of current block. + r.blockStart = u - int64(r.j) // Adjust current block start for accounting. + if u < absOffset { + // Forward inside block + return absOffset, r.Skip(absOffset - u) + } + if u > absOffset { + return 0, fmt.Errorf("s2 seek: (internal error) u (%d) > absOffset (%d)", u, absOffset) + } + return absOffset, nil +} + +// ReadAt reads len(p) bytes into p starting at offset off in the +// underlying input source. It returns the number of bytes +// read (0 <= n <= len(p)) and any error encountered. +// +// When ReadAt returns n < len(p), it returns a non-nil error +// explaining why more bytes were not returned. In this respect, +// ReadAt is stricter than Read. +// +// Even if ReadAt returns n < len(p), it may use all of p as scratch +// space during the call. If some data is available but not len(p) bytes, +// ReadAt blocks until either all the data is available or an error occurs. +// In this respect ReadAt is different from Read. +// +// If the n = len(p) bytes returned by ReadAt are at the end of the +// input source, ReadAt may return either err == EOF or err == nil. +// +// If ReadAt is reading from an input source with a seek offset, +// ReadAt should not affect nor be affected by the underlying +// seek offset. +// +// Clients of ReadAt can execute parallel ReadAt calls on the +// same input source. This is however not recommended. +func (r *ReadSeeker) ReadAt(p []byte, offset int64) (int, error) { + r.readAtMu.Lock() + defer r.readAtMu.Unlock() + _, err := r.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + n := 0 + for n < len(p) { + n2, err := r.Read(p[n:]) + if err != nil { + // This will include io.EOF + return n + n2, err + } + n += n2 + } + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + if r.i < r.j { + c := r.decoded[r.i] + r.i++ + return c, nil + } + var tmp [1]byte + for i := 0; i < 10; i++ { + n, err := r.Read(tmp[:]) + if err != nil { + return 0, err + } + if n == 1 { + return tmp[0], nil + } + } + return 0, io.ErrNoProgress +} + +// SkippableCB will register a callback for chunks with the specified ID. +// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive). +// For each chunk with the ID, the callback is called with the content. +// Any returned non-nil error will abort decompression. +// Only one callback per ID is supported, latest sent will be used. +// Sending a nil function will disable previous callbacks. +func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") + } + r.skippableCB[id] = fn + return nil +} diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go new file mode 100644 index 00000000000..dae3f731fab --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -0,0 +1,143 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2 implements the S2 compression format. +// +// S2 is an extension of Snappy. Similar to Snappy S2 is aimed for high throughput, +// which is why it features concurrent compression for bigger payloads. +// +// Decoding is compatible with Snappy compressed content, +// but content compressed with S2 cannot be decompressed by Snappy. +// +// For more information on Snappy/S2 differences see README in: https://github.com/klauspost/compress/tree/master/s2 +// +// There are actually two S2 formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a S2 stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// A "better" compression option is available. This will trade some compression +// speed +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// Blocks to not offer much data protection, so it is up to you to +// add data validation of decompressed blocks. +// +// Streams perform CRC validation of the decompressed data. +// Stream compression will also be performed on multiple CPU cores concurrently +// significantly improving throughput. +package s2 + +import ( + "bytes" + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicChunkSnappy = "\xff\x06\x00\x00" + magicBodySnappy + magicBodySnappy = "sNaPpY" + magicBody = "S2sTwO" + + // maxBlockSize is the maximum size of the input to encodeBlock. + // + // For the framing format (Writer type instead of Encode function), + // this is the maximum uncompressed size of a block. + maxBlockSize = 4 << 20 + + // minBlockSize is the minimum size of block setting when creating a writer. + minBlockSize = 4 << 10 + + skippableFrameHeader = 4 + maxChunkSize = 1<<24 - 1 // 16777215 + + // Default block size + defaultBlockSize = 1 << 20 + + // maxSnappyBlockSize is the maximum snappy block size. + maxSnappyBlockSize = 1 << 16 + + obufHeaderLen = checksumSize + chunkHeaderSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + ChunkTypeIndex = 0x99 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// literalExtraSize returns the extra size of encoding n literals. +// n should be >= 0 and <= math.MaxUint32. +func literalExtraSize(n int64) int64 { + if n == 0 { + return 0 + } + switch { + case n < 60: + return 1 + case n < 1<<8: + return 2 + case n < 1<<16: + return 3 + case n < 1<<24: + return 4 + default: + return 5 + } +} + +type byter interface { + Bytes() []byte +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go new file mode 100644 index 00000000000..089cd36d8cb --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -0,0 +1,1020 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Copyright (c) 2019+ Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package s2 + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "runtime" + "sync" +) + +const ( + levelUncompressed = iota + 1 + levelFast + levelBetter + levelBest +) + +// NewWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// Users must call Close to guarantee all data has been forwarded to +// the underlying io.Writer and that resources are released. +// They may also call Flush zero or more times before calling Close. +func NewWriter(w io.Writer, opts ...WriterOption) *Writer { + w2 := Writer{ + blockSize: defaultBlockSize, + concurrency: runtime.GOMAXPROCS(0), + randSrc: rand.Reader, + level: levelFast, + } + for _, opt := range opts { + if err := opt(&w2); err != nil { + w2.errState = err + return &w2 + } + } + w2.obufLen = obufHeaderLen + MaxEncodedLen(w2.blockSize) + w2.paramsOK = true + w2.ibuf = make([]byte, 0, w2.blockSize) + w2.buffers.New = func() interface{} { + return make([]byte, w2.obufLen) + } + w2.Reset(w) + return &w2 +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + errMu sync.Mutex + errState error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + ibuf []byte + + blockSize int + obufLen int + concurrency int + written int64 + uncompWritten int64 // Bytes sent to compression + output chan chan result + buffers sync.Pool + pad int + + writer io.Writer + randSrc io.Reader + writerWg sync.WaitGroup + index Index + customEnc func(dst, src []byte) int + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool + paramsOK bool + snappy bool + flushOnWrite bool + appendIndex bool + level uint8 +} + +type result struct { + b []byte + // Uncompressed start offset + startOffset int64 +} + +// err returns the previously set error. +// If no error has been set it is set to err if not nil. +func (w *Writer) err(err error) error { + w.errMu.Lock() + errSet := w.errState + if errSet == nil && err != nil { + w.errState = err + errSet = err + } + w.errMu.Unlock() + return errSet +} + +// Reset discards the writer's state and switches the Snappy writer to write to w. +// This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + if !w.paramsOK { + return + } + // Close previous writer, if any. + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + w.errState = nil + w.ibuf = w.ibuf[:0] + w.wroteStreamHeader = false + w.written = 0 + w.writer = writer + w.uncompWritten = 0 + w.index.reset(w.blockSize) + + // If we didn't get a writer, stop here. + if writer == nil { + return + } + // If no concurrency requested, don't spin up writer goroutine. + if w.concurrency == 1 { + return + } + + toWrite := make(chan chan result, w.concurrency) + w.output = toWrite + w.writerWg.Add(1) + + // Start a writer goroutine that will write all output in order. + go func() { + defer w.writerWg.Done() + + // Get a queued write. + for write := range toWrite { + // Wait for the data to be available. + input := <-write + in := input.b + if len(in) > 0 { + if w.err(nil) == nil { + // Don't expose data from previous buffers. + toWrite := in[:len(in):len(in)] + // Write to output. + n, err := writer.Write(toWrite) + if err == nil && n != len(toWrite) { + err = io.ErrShortBuffer + } + _ = w.err(err) + w.err(w.index.add(w.written, input.startOffset)) + w.written += int64(n) + } + } + if cap(in) >= w.obufLen { + w.buffers.Put(in) + } + // close the incoming write request. + // This can be used for synchronizing flushes. + close(write) + } + }() +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.flushOnWrite { + return w.write(p) + } + // If we exceed the input buffer size, start writing + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err(nil) == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + } + nRet += n + p = p[n:] + } + if err := w.err(nil); err != nil { + return nRet, err + } + // p should always be able to fit into w.ibuf now. + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +// ReadFrom implements the io.ReaderFrom interface. +// Using this is typically more efficient since it avoids a memory copy. +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) { + if err := w.err(nil); err != nil { + return 0, err + } + if len(w.ibuf) > 0 { + err := w.Flush() + if err != nil { + return 0, err + } + } + if br, ok := r.(byter); ok { + buf := br.Bytes() + if err := w.EncodeBuffer(buf); err != nil { + return 0, err + } + return int64(len(buf)), w.Flush() + } + for { + inbuf := w.buffers.Get().([]byte)[:w.blockSize+obufHeaderLen] + n2, err := io.ReadFull(r, inbuf[obufHeaderLen:]) + if err != nil { + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + if err != io.EOF { + return n, w.err(err) + } + } + if n2 == 0 { + break + } + n += int64(n2) + err2 := w.writeFull(inbuf[:n2+obufHeaderLen]) + if w.err(err2) != nil { + break + } + + if err != nil { + // We got EOF and wrote everything + break + } + } + + return n, w.err(nil) +} + +// AddSkippableBlock will add a skippable block to the stream. +// The ID must be 0x80-0xfe (inclusive). +// Length of the skippable block must be <= 16777215 bytes. +func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + if len(data) == 0 { + return nil + } + if id < 0x80 || id > chunkTypePadding { + return fmt.Errorf("invalid skippable block id %x", id) + } + if len(data) > maxChunkSize { + return fmt.Errorf("skippable block excessed maximum size") + } + var header [4]byte + chunkLen := 4 + len(data) + header[0] = id + header[1] = uint8(chunkLen >> 0) + header[2] = uint8(chunkLen >> 8) + header[3] = uint8(chunkLen >> 16) + if w.concurrency == 1 { + write := func(b []byte) error { + n, err := w.writer.Write(b) + if err = w.err(err); err != nil { + return err + } + if n != len(data) { + return w.err(io.ErrShortWrite) + } + w.written += int64(n) + return w.err(nil) + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + if w.snappy { + if err := write([]byte(magicChunkSnappy)); err != nil { + return err + } + } else { + if err := write([]byte(magicChunk)); err != nil { + return err + } + } + } + if err := write(header[:]); err != nil { + return err + } + if err := write(data); err != nil { + return err + } + } + + // Create output... + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + // Copy input. + inbuf := w.buffers.Get().([]byte)[:4] + copy(inbuf, header[:]) + inbuf = append(inbuf, data...) + + output := make(chan result, 1) + // Queue output. + w.output <- output + output <- result{startOffset: w.uncompWritten, b: inbuf} + + return nil +} + +// EncodeBuffer will add a buffer to the stream. +// This is the fastest way to encode a stream, +// but the input buffer cannot be written to by the caller +// until Flush or Close has been called when concurrency != 1. +// +// If you cannot control that, use the regular Write function. +// +// Note that input is not buffered. +// This means that each write will result in discrete blocks being created. +// For buffered writes, use the regular Write function. +func (w *Writer) EncodeBuffer(buf []byte) (err error) { + if err := w.err(nil); err != nil { + return err + } + + if w.flushOnWrite { + _, err := w.write(buf) + return err + } + // Flush queued data first. + if len(w.ibuf) > 0 { + err := w.Flush() + if err != nil { + return err + } + } + if w.concurrency == 1 { + _, err := w.writeSync(buf) + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + for len(buf) > 0 { + // Cut input. + uncompressed := buf + if len(uncompressed) > w.blockSize { + uncompressed = uncompressed[:w.blockSize] + } + buf = buf[len(uncompressed):] + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // copy uncompressed + copy(obuf[obufHeaderLen:], uncompressed) + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + }() + } + return nil +} + +func (w *Writer) encodeBlock(obuf, uncompressed []byte) int { + if w.customEnc != nil { + if ret := w.customEnc(obuf, uncompressed); ret >= 0 { + return ret + } + } + if w.snappy { + switch w.level { + case levelFast: + return encodeBlockSnappy(obuf, uncompressed) + case levelBetter: + return encodeBlockBetterSnappy(obuf, uncompressed) + case levelBest: + return encodeBlockBestSnappy(obuf, uncompressed) + } + return 0 + } + switch w.level { + case levelFast: + return encodeBlock(obuf, uncompressed) + case levelBetter: + return encodeBlockBetter(obuf, uncompressed) + case levelBest: + return encodeBlockBest(obuf, uncompressed, nil) + } + return 0 +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if w.concurrency == 1 { + return w.writeSync(p) + } + + // Spawn goroutine and write block to output channel. + for len(p) > 0 { + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + // Copy input. + // If the block is incompressible, this is used for the result. + inbuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + obuf := w.buffers.Get().([]byte)[:w.obufLen] + copy(inbuf[obufHeaderLen:], uncompressed) + uncompressed = inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + nRet += len(uncompressed) + } + return nRet, nil +} + +// writeFull is a special version of write that will always write the full buffer. +// Data to be compressed should start at offset obufHeaderLen and fill the remainder of the buffer. +// The data will be written as a single block. +// The caller is not allowed to use inbuf after this function has been called. +func (w *Writer) writeFull(inbuf []byte) (errRet error) { + if err := w.err(nil); err != nil { + return err + } + + if w.concurrency == 1 { + _, err := w.writeSync(inbuf[obufHeaderLen:]) + return err + } + + // Spawn goroutine and write block to output channel. + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + hWriter := make(chan result) + w.output <- hWriter + if w.snappy { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunkSnappy)} + } else { + hWriter <- result{startOffset: w.uncompWritten, b: []byte(magicChunk)} + } + } + + // Get an output buffer. + obuf := w.buffers.Get().([]byte)[:w.obufLen] + uncompressed := inbuf[obufHeaderLen:] + + output := make(chan result) + // Queue output now, so we keep order. + w.output <- output + res := result{ + startOffset: w.uncompWritten, + } + w.uncompWritten += int64(len(uncompressed)) + + go func() { + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + // Check if we should use this, or store as uncompressed instead. + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + // Use input as output. + obuf, inbuf = inbuf, obuf + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + // Queue final output. + res.b = obuf + output <- res + + // Put unused buffer back in pool. + w.buffers.Put(inbuf) + }() + return nil +} + +func (w *Writer) writeSync(p []byte) (nRet int, errRet error) { + if err := w.err(nil); err != nil { + return 0, err + } + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + var n int + var err error + if w.snappy { + n, err = w.writer.Write([]byte(magicChunkSnappy)) + } else { + n, err = w.writer.Write([]byte(magicChunk)) + } + if err != nil { + return 0, w.err(err) + } + if n != len(magicChunk) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + + for len(p) > 0 { + var uncompressed []byte + if len(p) > w.blockSize { + uncompressed, p = p[:w.blockSize], p[w.blockSize:] + } else { + uncompressed, p = p, nil + } + + obuf := w.buffers.Get().([]byte)[:w.obufLen] + checksum := crc(uncompressed) + + // Set to uncompressed. + chunkType := uint8(chunkTypeUncompressedData) + chunkLen := 4 + len(uncompressed) + + // Attempt compressing. + n := binary.PutUvarint(obuf[obufHeaderLen:], uint64(len(uncompressed))) + n2 := w.encodeBlock(obuf[obufHeaderLen+n:], uncompressed) + + if n2 > 0 { + chunkType = uint8(chunkTypeCompressedData) + chunkLen = 4 + n + n2 + obuf = obuf[:obufHeaderLen+n+n2] + } else { + obuf = obuf[:8] + } + + // Fill in the per-chunk header that comes before the body. + obuf[0] = chunkType + obuf[1] = uint8(chunkLen >> 0) + obuf[2] = uint8(chunkLen >> 8) + obuf[3] = uint8(chunkLen >> 16) + obuf[4] = uint8(checksum >> 0) + obuf[5] = uint8(checksum >> 8) + obuf[6] = uint8(checksum >> 16) + obuf[7] = uint8(checksum >> 24) + + n, err := w.writer.Write(obuf) + if err != nil { + return 0, w.err(err) + } + if n != len(obuf) { + return 0, w.err(io.ErrShortWrite) + } + w.err(w.index.add(w.written, w.uncompWritten)) + w.written += int64(n) + w.uncompWritten += int64(len(uncompressed)) + + if chunkType == chunkTypeUncompressedData { + // Write uncompressed data. + n, err := w.writer.Write(uncompressed) + if err != nil { + return 0, w.err(err) + } + if n != len(uncompressed) { + return 0, w.err(io.ErrShortWrite) + } + w.written += int64(n) + } + w.buffers.Put(obuf) + // Queue final output. + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +// This does not apply padding. +func (w *Writer) Flush() error { + if err := w.err(nil); err != nil { + return err + } + + // Queue any data still in input buffer. + if len(w.ibuf) != 0 { + if !w.wroteStreamHeader { + _, err := w.writeSync(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err(err) + } else { + _, err := w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + err = w.err(err) + if err != nil { + return err + } + } + } + if w.output == nil { + return w.err(nil) + } + + // Send empty buffer + res := make(chan result) + w.output <- res + // Block until this has been picked up. + res <- result{b: nil, startOffset: w.uncompWritten} + // When it is closed, we have flushed. + <-res + return w.err(nil) +} + +// Close calls Flush and then closes the Writer. +// Calling Close multiple times is ok, +// but calling CloseIndex after this will make it not return the index. +func (w *Writer) Close() error { + _, err := w.closeIndex(w.appendIndex) + return err +} + +// CloseIndex calls Close and returns an index on first call. +// This is not required if you are only adding index to a stream. +func (w *Writer) CloseIndex() ([]byte, error) { + return w.closeIndex(true) +} + +func (w *Writer) closeIndex(idx bool) ([]byte, error) { + err := w.Flush() + if w.output != nil { + close(w.output) + w.writerWg.Wait() + w.output = nil + } + + var index []byte + if w.err(err) == nil && w.writer != nil { + // Create index. + if idx { + compSize := int64(-1) + if w.pad <= 1 { + compSize = w.written + } + index = w.index.appendTo(w.ibuf[:0], w.uncompWritten, compSize) + // Count as written for padding. + if w.appendIndex { + w.written += int64(len(index)) + } + } + + if w.pad > 1 { + tmp := w.ibuf[:0] + if len(index) > 0 { + // Allocate another buffer. + tmp = w.buffers.Get().([]byte)[:0] + defer w.buffers.Put(tmp) + } + add := calcSkippableFrame(w.written, int64(w.pad)) + frame, err := skippableFrame(tmp, add, w.randSrc) + if err = w.err(err); err != nil { + return nil, err + } + n, err2 := w.writer.Write(frame) + if err2 == nil && n != len(frame) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + if len(index) > 0 && w.appendIndex { + n, err2 := w.writer.Write(index) + if err2 == nil && n != len(index) { + err2 = io.ErrShortWrite + } + _ = w.err(err2) + } + } + err = w.err(errClosed) + if err == errClosed { + return index, nil + } + return nil, err +} + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < maxBlockSize + skippableFrameHeader +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) < 4", total) + } + if int64(total) >= maxBlockSize+skippableFrameHeader { + return dst, fmt.Errorf("s2: requested skippable frame (%d) >= max 1<<24", total) + } + // Chunk type 0xfe "Section 4.4 Padding (chunk type 0xfe)" + dst = append(dst, chunkTypePadding) + f := uint32(total - skippableFrameHeader) + // Add chunk length. + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16)) + // Add data + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} + +var errClosed = errors.New("s2: Writer is closed") + +// WriterOption is an option for creating a encoder. +type WriterOption func(*Writer) error + +// WriterConcurrency will set the concurrency, +// meaning the maximum number of decoders to run concurrently. +// The value supplied must be at least 1. +// By default this will be set to GOMAXPROCS. +func WriterConcurrency(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return errors.New("concurrency must be at least 1") + } + w.concurrency = n + return nil + } +} + +// WriterAddIndex will append an index to the end of a stream +// when it is closed. +func WriterAddIndex() WriterOption { + return func(w *Writer) error { + w.appendIndex = true + return nil + } +} + +// WriterBetterCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// 10-40% speed decrease on both compression and decompression. +func WriterBetterCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBetter + return nil + } +} + +// WriterBestCompression will enable better compression. +// EncodeBetter compresses better than Encode but typically with a +// big speed decrease on compression. +func WriterBestCompression() WriterOption { + return func(w *Writer) error { + w.level = levelBest + return nil + } +} + +// WriterUncompressed will bypass compression. +// The stream will be written as uncompressed blocks only. +// If concurrency is > 1 CRC and output will still be done async. +func WriterUncompressed() WriterOption { + return func(w *Writer) error { + w.level = levelUncompressed + return nil + } +} + +// WriterBlockSize allows to override the default block size. +// Blocks will be this size or smaller. +// Minimum size is 4KB and and maximum size is 4MB. +// +// Bigger blocks may give bigger throughput on systems with many cores, +// and will increase compression slightly, but it will limit the possible +// concurrency for smaller payloads for both encoding and decoding. +// Default block size is 1MB. +// +// When writing Snappy compatible output using WriterSnappyCompat, +// the maximum block size is 64KB. +func WriterBlockSize(n int) WriterOption { + return func(w *Writer) error { + if w.snappy && n > maxSnappyBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 64K and >=4KB on for snappy compatible output") + } + if n > maxBlockSize || n < minBlockSize { + return errors.New("s2: block size too large. Must be <= 4MB and >=4KB") + } + w.blockSize = n + return nil + } +} + +// WriterPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 4MB. +// The padded area will be filled with data from crypto/rand.Reader. +// The padding will be applied whenever Close is called on the writer. +func WriterPadding(n int) WriterOption { + return func(w *Writer) error { + if n <= 0 { + return fmt.Errorf("s2: padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + w.pad = 0 + } + if n > maxBlockSize { + return fmt.Errorf("s2: padding must less than 4MB") + } + w.pad = n + return nil + } +} + +// WriterPaddingSrc will get random data for padding from the supplied source. +// By default crypto/rand is used. +func WriterPaddingSrc(reader io.Reader) WriterOption { + return func(w *Writer) error { + w.randSrc = reader + return nil + } +} + +// WriterSnappyCompat will write snappy compatible output. +// The output can be decompressed using either snappy or s2. +// If block size is more than 64KB it is set to that. +func WriterSnappyCompat() WriterOption { + return func(w *Writer) error { + w.snappy = true + if w.blockSize > 64<<10 { + // We choose 8 bytes less than 64K, since that will make literal emits slightly more effective. + // And allows us to skip some size checks. + w.blockSize = (64 << 10) - 8 + } + return nil + } +} + +// WriterFlushOnWrite will compress blocks on each call to the Write function. +// +// This is quite inefficient as blocks size will depend on the write size. +// +// Use WriterConcurrency(1) to also make sure that output is flushed. +// When Write calls return, otherwise they will be written when compression is done. +func WriterFlushOnWrite() WriterOption { + return func(w *Writer) error { + w.flushOnWrite = true + return nil + } +} + +// WriterCustomEncoder allows to override the encoder for blocks on the stream. +// The function must compress 'src' into 'dst' and return the bytes used in dst as an integer. +// Block size (initial varint) should not be added by the encoder. +// Returning value 0 indicates the block could not be compressed. +// Returning a negative value indicates that compression should be attempted. +// The function should expect to be called concurrently. +func WriterCustomEncoder(fn func(dst, src []byte) int) WriterOption { + return func(w *Writer) error { + w.customEnc = fn + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/snappy/.gitignore b/vendor/github.com/klauspost/compress/snappy/.gitignore new file mode 100644 index 00000000000..042091d9b3b --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/klauspost/compress/snappy/AUTHORS b/vendor/github.com/klauspost/compress/snappy/AUTHORS new file mode 100644 index 00000000000..52ccb5a934d --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/AUTHORS @@ -0,0 +1,18 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Amazon.com, Inc +Damian Gryski +Eric Buth +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Klaus Post +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS new file mode 100644 index 00000000000..ea6524ddd02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/CONTRIBUTORS @@ -0,0 +1,41 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Alex Legg +Damian Gryski +Eric Buth +Jan Mercl <0xjnml@gmail.com> +Jonathan Swinney +Kai Backman +Klaus Post +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/klauspost/compress/snappy/LICENSE b/vendor/github.com/klauspost/compress/snappy/LICENSE new file mode 100644 index 00000000000..6050c10f4c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/snappy/README.md b/vendor/github.com/klauspost/compress/snappy/README.md new file mode 100644 index 00000000000..8271bbd0903 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/README.md @@ -0,0 +1,17 @@ +# snappy + +The Snappy compression format in the Go programming language. + +This is a drop-in replacement for `github.com/golang/snappy`. + +It provides a full, compatible replacement of the Snappy package by simply changing imports. + +See [Snappy Compatibility](https://github.com/klauspost/compress/tree/master/s2#snappy-compatibility) in the S2 documentation. + +"Better" compression mode is used. For buffered streams concurrent compression is used. + +For more options use the [s2 package](https://pkg.go.dev/github.com/klauspost/compress/s2). + +# usage + +Replace imports `github.com/golang/snappy` with `github.com/klauspost/compress/snappy`. diff --git a/vendor/github.com/klauspost/compress/snappy/decode.go b/vendor/github.com/klauspost/compress/snappy/decode.go new file mode 100644 index 00000000000..89f1fa23444 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/decode.go @@ -0,0 +1,60 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "io" + + "github.com/klauspost/compress/s2" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = s2.ErrCorrupt + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = s2.ErrTooLarge + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = s2.ErrUnsupported +) + +const ( + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + return s2.DecodedLen(src) +} + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + return s2.Decode(dst, src) +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return s2.NewReader(r, s2.ReaderMaxBlockSize(maxBlockSize)) +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader = s2.Reader diff --git a/vendor/github.com/klauspost/compress/snappy/encode.go b/vendor/github.com/klauspost/compress/snappy/encode.go new file mode 100644 index 00000000000..e8bd72c1864 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/encode.go @@ -0,0 +1,59 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "io" + + "github.com/klauspost/compress/s2" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + return s2.EncodeSnappyBetter(dst, src) +} + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + return s2.MaxEncodedLen(srcLen) +} + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression(), s2.WriterFlushOnWrite(), s2.WriterConcurrency(1)) +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return s2.NewWriter(w, s2.WriterSnappyCompat(), s2.WriterBetterCompression()) +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer = s2.Writer diff --git a/vendor/github.com/klauspost/compress/snappy/snappy.go b/vendor/github.com/klauspost/compress/snappy/snappy.go new file mode 100644 index 00000000000..398cdc95a01 --- /dev/null +++ b/vendor/github.com/klauspost/compress/snappy/snappy.go @@ -0,0 +1,46 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ diff --git a/vendor/github.com/segmentio/kafka-go/.gitattributes b/vendor/github.com/segmentio/kafka-go/.gitattributes new file mode 100644 index 00000000000..0cf33618e25 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/.gitattributes @@ -0,0 +1 @@ +fixtures/*.hex binary diff --git a/vendor/github.com/segmentio/kafka-go/.gitignore b/vendor/github.com/segmentio/kafka-go/.gitignore new file mode 100644 index 00000000000..f8b4085e35b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/.gitignore @@ -0,0 +1,40 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/kafkacli + +# Emacs +*~ + +# VIM +*.swp + +# Goland +.idea + +#IntelliJ +*.iml + +# govendor +/vendor/*/ diff --git a/vendor/github.com/segmentio/kafka-go/.golangci.yml b/vendor/github.com/segmentio/kafka-go/.golangci.yml new file mode 100644 index 00000000000..040910ab6ff --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/.golangci.yml @@ -0,0 +1,18 @@ +linters: + enable: + - bodyclose + - errorlint + - goconst + - godot + - gofmt + - goimports + - prealloc + + disable: + # Temporarily disabling so it can be addressed in a dedicated PR. + - errcheck + - goerr113 + +linters-settings: + goconst: + ignore-tests: true diff --git a/vendor/github.com/segmentio/kafka-go/CODE_OF_CONDUCT.md b/vendor/github.com/segmentio/kafka-go/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..359d39b9df3 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +- The use of sexualized language or imagery and unwelcome sexual attention or + advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or electronic + address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +Project maintainers are available at [#kafka-go](https://gophers.slack.com/archives/CG4H0N9PX) channel inside the [Gophers Slack](https://gophers.slack.com) + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at open-source@twilio.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org diff --git a/vendor/github.com/segmentio/kafka-go/CONTRIBUTING.md b/vendor/github.com/segmentio/kafka-go/CONTRIBUTING.md new file mode 100644 index 00000000000..a52ad6e916b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/CONTRIBUTING.md @@ -0,0 +1,139 @@ +# Contributing to kafka-go + +kafka-go is an open source project. We welcome contributions to kafka-go of any kind including documentation, +organization, tutorials, bug reports, issues, feature requests, feature implementations, pull requests, etc. + +## Table of Contents + +* [Reporting Issues](#reporting-issues) +* [Submitting Patches](#submitting-patches) + * [Code Contribution Guidelines](#code-contribution-guidelines) + * [Git Commit Message Guidelines](#git-commit-message-guidelines) + * [Fetching the Source From GitHub](#fetching-the-sources-from-github) + * [Building kafka-go with Your Changes](#building-kakfa-go-with-your-changes) + +## Reporting Issues + +If you believe you have found a defect in kafka-go, use the GitHub issue tracker to report +the problem to the maintainers. +When reporting the issue, please provide the version of kafka-go, what version(s) of Kafka +are you testing against, and your operating system. + + - [kafka-go Issues segmentio/kafka-go](https://github.com/segmentio/kafka-go/issues) + +## Submitting Patches + +kafka-go project welcomes all contributors and contributions regardless of skill or experience levels. If you are +interested in helping with the project, we will help you with your contribution. + +### Code Contribution + +To make contributions as seamless as possible, we ask the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to allow for review and discussion of code changes. +* When you’re ready to create a pull request, be sure to: + * Have test cases for the new code. If you have questions about how to do this, please ask in your pull request. + * Run `go fmt`. + * Squash your commits into a single commit. `git rebase -i`. It’s okay to force update your pull request with `git push -f`. + * Follow the **Git Commit Message Guidelines** below. + +### Git Commit Message Guidelines + +This [blog article](http://chris.beams.io/posts/git-commit/) is a good resource for learning how to write good commit messages, +the most important part being that each commit message should have a title/subject in imperative mood starting with a capital letter and no trailing period: +*"Return error on wrong use of the Reader"*, **NOT** *"returning some error."* + +Also, if your commit references one or more GitHub issues, always end your commit message body with *See #1234* or *Fixes #1234*. +Replace *1234* with the GitHub issue ID. The last example will close the issue when the commit is merged into *master*. + +Please use a short and descriptive branch name, e.g. NOT "patch-1". It's very common but creates a naming conflict each +time when a submission is pulled for a review. + +An example: + +```text +Add Code of Conduct and Code Contribution Guidelines + +Add a full Code of Conduct and Code Contribution Guidelines document. +Provide description on how best to retrieve code, fork, checkout, and commit changes. + +Fixes #688 +``` + +### Fetching the Sources From GitHub + +We use Go Modules support built into Go 1.11 to build. The easiest way is to clone kafka-go into a directory outside of +`GOPATH`, as in the following example: + +```bash +mkdir $HOME/src +cd $HOME/src +git clone https://github.com/segmentio/kafka-go.git +cd kafka-go +go build ./... +``` + +To make changes to kafka-go's source: + +1. Create a new branch for your changes (the branch name is arbitrary): + + ```bash + git checkout -b branch1234 + ``` + +1. After making your changes, commit them to your new branch: + + ```bash + git commit -a -v + ``` + +1. Fork kafka-go in GitHub + +1. Add your fork as a new remote (the remote name, "upstream" in this example, is arbitrary): + + ```bash + git remote add upstream git@github.com:USERNAME/kafka-go.git + ``` + +1. Push your branch (the remote name, "upstream" in this example, is arbitrary): + + ```bash + git push upstream + ``` + +1. You are now ready to submit a PR based upon the new branch in your forked repository. + +### Using the forked library + +To replace the original version of kafka-go library with a forked version is accomplished this way. + +1. Make sure your application already has a go.mod entry depending on kafka-go + + ```bash + module github.com/myusername/myapp + + require ( + ... + github.com/segmentio/kafka-go v1.2.3 + ... + ) + ``` + +1. Add the following entry to the beginning of the modules file. + + ```bash + module github.com/myusername/myapp + + replace github.com/segmentio/kafka-go v1.2.3 => ../local/directory + + require ( + ... + github.com/segmentio/kafka-go v1.2.3 + ... + ) + ``` +1. Depending on if you are using `vendor`ing or not you might need to run the following command to pull in the new bits. + + ```bash + > go mod vendor + ``` diff --git a/vendor/github.com/segmentio/kafka-go/LICENSE b/vendor/github.com/segmentio/kafka-go/LICENSE new file mode 100644 index 00000000000..09e136c5100 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/kafka-go/Makefile b/vendor/github.com/segmentio/kafka-go/Makefile new file mode 100644 index 00000000000..e2374f2e3d4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/Makefile @@ -0,0 +1,7 @@ +test: + KAFKA_SKIP_NETTEST=1 \ + KAFKA_VERSION=2.3.1 \ + go test -race -cover ./... + +docker: + docker-compose up -d diff --git a/vendor/github.com/segmentio/kafka-go/README.md b/vendor/github.com/segmentio/kafka-go/README.md new file mode 100644 index 00000000000..304c1603be0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/README.md @@ -0,0 +1,799 @@ +# kafka-go [![CircleCI](https://circleci.com/gh/segmentio/kafka-go.svg?style=shield)](https://circleci.com/gh/segmentio/kafka-go) [![Go Report Card](https://goreportcard.com/badge/github.com/segmentio/kafka-go)](https://goreportcard.com/report/github.com/segmentio/kafka-go) [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go) + +## Motivations + +We rely on both Go and Kafka a lot at Segment. Unfortunately, the state of the Go +client libraries for Kafka at the time of this writing was not ideal. The available +options were: + +- [sarama](https://github.com/Shopify/sarama), which is by far the most popular +but is quite difficult to work with. It is poorly documented, the API exposes +low level concepts of the Kafka protocol, and it doesn't support recent Go features +like [contexts](https://golang.org/pkg/context/). It also passes all values as +pointers which causes large numbers of dynamic memory allocations, more frequent +garbage collections, and higher memory usage. + +- [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go) is a +cgo based wrapper around [librdkafka](https://github.com/edenhill/librdkafka), +which means it introduces a dependency to a C library on all Go code that uses +the package. It has much better documentation than sarama but still lacks support +for Go contexts. + +- [goka](https://github.com/lovoo/goka) is a more recent Kafka client for Go +which focuses on a specific usage pattern. It provides abstractions for using Kafka +as a message passing bus between services rather than an ordered log of events, but +this is not the typical use case of Kafka for us at Segment. The package also +depends on sarama for all interactions with Kafka. + +This is where `kafka-go` comes into play. It provides both low and high level +APIs for interacting with Kafka, mirroring concepts and implementing interfaces of +the Go standard library to make it easy to use and integrate with existing +software. + +#### Note: + +In order to better align with our newly adopted Code of Conduct, the kafka-go +project has renamed our default branch to `main`. For the full details of our +Code Of Conduct see [this](./CODE_OF_CONDUCT.md) document. + +## Kafka versions + +`kafka-go` is currently tested with Kafka versions 0.10.1.0 to 2.7.1. +While it should also be compatible with later versions, newer features available +in the Kafka API may not yet be implemented in the client. + +## Go versions + +`kafka-go` requires Go version 1.15 or later. + +## Connection [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Conn) + +The `Conn` type is the core of the `kafka-go` package. It wraps around a raw +network connection to expose a low-level API to a Kafka server. + +Here are some examples showing typical use of a connection object: +```go +// to produce messages +topic := "my-topic" +partition := 0 + +conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition) +if err != nil { + log.Fatal("failed to dial leader:", err) +} + +conn.SetWriteDeadline(time.Now().Add(10*time.Second)) +_, err = conn.WriteMessages( + kafka.Message{Value: []byte("one!")}, + kafka.Message{Value: []byte("two!")}, + kafka.Message{Value: []byte("three!")}, +) +if err != nil { + log.Fatal("failed to write messages:", err) +} + +if err := conn.Close(); err != nil { + log.Fatal("failed to close writer:", err) +} +``` +```go +// to consume messages +topic := "my-topic" +partition := 0 + +conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", topic, partition) +if err != nil { + log.Fatal("failed to dial leader:", err) +} + +conn.SetReadDeadline(time.Now().Add(10*time.Second)) +batch := conn.ReadBatch(10e3, 1e6) // fetch 10KB min, 1MB max + +b := make([]byte, 10e3) // 10KB max per message +for { + n, err := batch.Read(b) + if err != nil { + break + } + fmt.Println(string(b[:n])) +} + +if err := batch.Close(); err != nil { + log.Fatal("failed to close batch:", err) +} + +if err := conn.Close(); err != nil { + log.Fatal("failed to close connection:", err) +} +``` + +### To Create Topics +By default kafka has the `auto.create.topics.enable='true'` (`KAFKA_AUTO_CREATE_TOPICS_ENABLE='true'` in the wurstmeister/kafka kafka docker image). If this value is set to `'true'` then topics will be created as a side effect of `kafka.DialLeader` like so: +```go +// to create topics when auto.create.topics.enable='true' +conn, err := kafka.DialLeader(context.Background(), "tcp", "localhost:9092", "my-topic", 0) +if err != nil { + panic(err.Error()) +} +``` + +If `auto.create.topics.enable='false'` then you will need to create topics explicitly like so: +```go +// to create topics when auto.create.topics.enable='false' +topic := "my-topic" + +conn, err := kafka.Dial("tcp", "localhost:9092") +if err != nil { + panic(err.Error()) +} +defer conn.Close() + +controller, err := conn.Controller() +if err != nil { + panic(err.Error()) +} +var controllerConn *kafka.Conn +controllerConn, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port))) +if err != nil { + panic(err.Error()) +} +defer controllerConn.Close() + + +topicConfigs := []kafka.TopicConfig{ + { + Topic: topic, + NumPartitions: 1, + ReplicationFactor: 1, + }, +} + +err = controllerConn.CreateTopics(topicConfigs...) +if err != nil { + panic(err.Error()) +} +``` + +### To Connect To Leader Via a Non-leader Connection +```go +// to connect to the kafka leader via an existing non-leader connection rather than using DialLeader +conn, err := kafka.Dial("tcp", "localhost:9092") +if err != nil { + panic(err.Error()) +} +defer conn.Close() +controller, err := conn.Controller() +if err != nil { + panic(err.Error()) +} +var connLeader *kafka.Conn +connLeader, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port))) +if err != nil { + panic(err.Error()) +} +defer connLeader.Close() +``` + +### To list topics +```go +conn, err := kafka.Dial("tcp", "localhost:9092") +if err != nil { + panic(err.Error()) +} +defer conn.Close() + +partitions, err := conn.ReadPartitions() +if err != nil { + panic(err.Error()) +} + +m := map[string]struct{}{} + +for _, p := range partitions { + m[p.Topic] = struct{}{} +} +for k := range m { + fmt.Println(k) +} +``` + + +Because it is low level, the `Conn` type turns out to be a great building block +for higher level abstractions, like the `Reader` for example. + +## Reader [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Reader) + +A `Reader` is another concept exposed by the `kafka-go` package, which intends +to make it simpler to implement the typical use case of consuming from a single +topic-partition pair. +A `Reader` also automatically handles reconnections and offset management, and +exposes an API that supports asynchronous cancellations and timeouts using Go +contexts. + +Note that it is important to call `Close()` on a `Reader` when a process exits. +The kafka server needs a graceful disconnect to stop it from continuing to +attempt to send messages to the connected clients. The given example will not +call `Close()` if the process is terminated with SIGINT (ctrl-c at the shell) or +SIGTERM (as docker stop or a kubernetes restart does). This can result in a +delay when a new reader on the same topic connects (e.g. new process started +or new container running). Use a `signal.Notify` handler to close the reader on +process shutdown. + +```go +// make a new reader that consumes from topic-A, partition 0, at offset 42 +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092","localhost:9093", "localhost:9094"}, + Topic: "topic-A", + Partition: 0, + MaxBytes: 10e6, // 10MB +}) +r.SetOffset(42) + +for { + m, err := r.ReadMessage(context.Background()) + if err != nil { + break + } + fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value)) +} + +if err := r.Close(); err != nil { + log.Fatal("failed to close reader:", err) +} +``` + +### Consumer Groups + +```kafka-go``` also supports Kafka consumer groups including broker managed offsets. +To enable consumer groups, simply specify the GroupID in the ReaderConfig. + +ReadMessage automatically commits offsets when using consumer groups. + +```go +// make a new reader that consumes from topic-A +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + GroupID: "consumer-group-id", + Topic: "topic-A", + MaxBytes: 10e6, // 10MB +}) + +for { + m, err := r.ReadMessage(context.Background()) + if err != nil { + break + } + fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) +} + +if err := r.Close(); err != nil { + log.Fatal("failed to close reader:", err) +} +``` + +There are a number of limitations when using consumer groups: + +* ```(*Reader).SetOffset``` will return an error when GroupID is set +* ```(*Reader).Offset``` will always return ```-1``` when GroupID is set +* ```(*Reader).Lag``` will always return ```-1``` when GroupID is set +* ```(*Reader).ReadLag``` will return an error when GroupID is set +* ```(*Reader).Stats``` will return a partition of ```-1``` when GroupID is set + +### Explicit Commits + +```kafka-go``` also supports explicit commits. Instead of calling ```ReadMessage```, +call ```FetchMessage``` followed by ```CommitMessages```. + +```go +ctx := context.Background() +for { + m, err := r.FetchMessage(ctx) + if err != nil { + break + } + fmt.Printf("message at topic/partition/offset %v/%v/%v: %s = %s\n", m.Topic, m.Partition, m.Offset, string(m.Key), string(m.Value)) + if err := r.CommitMessages(ctx, m); err != nil { + log.Fatal("failed to commit messages:", err) + } +} +``` + +When committing messages in consumer groups, the message with the highest offset +for a given topic/partition determines the value of the committed offset for +that partition. For example, if messages at offset 1, 2, and 3 of a single +partition were retrieved by call to `FetchMessage`, calling `CommitMessages` +with message offset 3 will also result in committing the messages at offsets 1 +and 2 for that partition. + +### Managing Commits + +By default, CommitMessages will synchronously commit offsets to Kafka. For +improved performance, you can instead periodically commit offsets to Kafka +by setting CommitInterval on the ReaderConfig. + + +```go +// make a new reader that consumes from topic-A +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + GroupID: "consumer-group-id", + Topic: "topic-A", + MaxBytes: 10e6, // 10MB + CommitInterval: time.Second, // flushes commits to Kafka every second +}) +``` + +## Writer [![GoDoc](https://godoc.org/github.com/segmentio/kafka-go?status.svg)](https://godoc.org/github.com/segmentio/kafka-go#Writer) + +To produce messages to Kafka, a program may use the low-level `Conn` API, but +the package also provides a higher level `Writer` type which is more appropriate +to use in most cases as it provides additional features: + +- Automatic retries and reconnections on errors. +- Configurable distribution of messages across available partitions. +- Synchronous or asynchronous writes of messages to Kafka. +- Asynchronous cancellation using contexts. +- Flushing of pending messages on close to support graceful shutdowns. +- Creation of a missing topic before publishing a message. *Note!* it was the default behaviour up to the version `v0.4.30`. + +```go +// make a writer that produces to topic-A, using the least-bytes distribution +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: &kafka.LeastBytes{}, +} + +err := w.WriteMessages(context.Background(), + kafka.Message{ + Key: []byte("Key-A"), + Value: []byte("Hello World!"), + }, + kafka.Message{ + Key: []byte("Key-B"), + Value: []byte("One!"), + }, + kafka.Message{ + Key: []byte("Key-C"), + Value: []byte("Two!"), + }, +) +if err != nil { + log.Fatal("failed to write messages:", err) +} + +if err := w.Close(); err != nil { + log.Fatal("failed to close writer:", err) +} +``` + +### Missing topic creation before publication + +```go +// Make a writer that publishes messages to topic-A. +// The topic will be created if it is missing. +w := &Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + AllowAutoTopicCreation: true, +} + +messages := []kafka.Message{ + { + Key: []byte("Key-A"), + Value: []byte("Hello World!"), + }, + { + Key: []byte("Key-B"), + Value: []byte("One!"), + }, + { + Key: []byte("Key-C"), + Value: []byte("Two!"), + }, +} + +var err error +const retries = 3 +for i := 0; i < retries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // attempt to create topic prior to publishing the message + err = w.WriteMessages(ctx, messages...) + if errors.Is(err, LeaderNotAvailable) || errors.Is(err, context.DeadlineExceeded) { + time.Sleep(time.Millisecond * 250) + continue + } + + if err != nil { + log.Fatalf("unexpected error %v", err) + } + break +} + +if err := w.Close(); err != nil { + log.Fatal("failed to close writer:", err) +} +``` + +### Writing to multiple topics + +Normally, the `WriterConfig.Topic` is used to initialize a single-topic writer. +By excluding that particular configuration, you are given the ability to define +the topic on a per-message basis by setting `Message.Topic`. + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + // NOTE: When Topic is not defined here, each Message must define it instead. + Balancer: &kafka.LeastBytes{}, +} + +err := w.WriteMessages(context.Background(), + // NOTE: Each Message has Topic defined, otherwise an error is returned. + kafka.Message{ + Topic: "topic-A", + Key: []byte("Key-A"), + Value: []byte("Hello World!"), + }, + kafka.Message{ + Topic: "topic-B", + Key: []byte("Key-B"), + Value: []byte("One!"), + }, + kafka.Message{ + Topic: "topic-C", + Key: []byte("Key-C"), + Value: []byte("Two!"), + }, +) +if err != nil { + log.Fatal("failed to write messages:", err) +} + +if err := w.Close(); err != nil { + log.Fatal("failed to close writer:", err) +} +``` + +**NOTE:** These 2 patterns are mutually exclusive, if you set `Writer.Topic`, +you must not also explicitly define `Message.Topic` on the messages you are +writing. The opposite applies when you do not define a topic for the writer. +The `Writer` will return an error if it detects this ambiguity. + +### Compatibility with other clients + +#### Sarama + +If you're switching from Sarama and need/want to use the same algorithm for message partitioning, you can either use +the `kafka.Hash` balancer or the `kafka.ReferenceHash` balancer: +* `kafka.Hash` = `sarama.NewHashPartitioner` +* `kafka.ReferenceHash` = `sarama.NewReferenceHashPartitioner` + +The `kafka.Hash` and `kafka.ReferenceHash` balancers would route messages to the same partitions that the two +aforementioned Sarama partitioners would route them to. + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: &kafka.Hash{}, +} +``` + +#### librdkafka and confluent-kafka-go + +Use the ```kafka.CRC32Balancer``` balancer to get the same behaviour as librdkafka's +default ```consistent_random``` partition strategy. + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: kafka.CRC32Balancer{}, +} +``` + +#### Java + +Use the ```kafka.Murmur2Balancer``` balancer to get the same behaviour as the canonical +Java client's default partitioner. Note: the Java class allows you to directly specify +the partition which is not permitted. + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: kafka.Murmur2Balancer{}, +} +``` + +### Compression + +Compression can be enabled on the `Writer` by setting the `Compression` field: + +```go +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Compression: kafka.Snappy, +} +``` + +The `Reader` will by determine if the consumed messages are compressed by +examining the message attributes. However, the package(s) for all expected +codecs must be imported so that they get loaded correctly. + +_Note: in versions prior to 0.4 programs had to import compression packages to +install codecs and support reading compressed messages from kafka. This is no +longer the case and import of the compression packages are now no-ops._ + +## TLS Support + +For a bare bones Conn type or in the Reader/Writer configs you can specify a dialer option for TLS support. If the TLS field is nil, it will not connect with TLS. +*Note:* Connecting to a Kafka cluster with TLS enabled without configuring TLS on the Conn/Reader/Writer can manifest in opaque io.ErrUnexpectedEOF errors. + + +### Connection + +```go +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + TLS: &tls.Config{...tls config...}, +} + +conn, err := dialer.DialContext(ctx, "tcp", "localhost:9093") +``` + +### Reader + +```go +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + TLS: &tls.Config{...tls config...}, +} + +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + GroupID: "consumer-group-id", + Topic: "topic-A", + Dialer: dialer, +}) +``` + +### Writer + + +Direct Writer creation + +```go +w := kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: &kafka.Hash{}, + Transport: &kafka.Transport{ + TLS: &tls.Config{}, + }, + } +``` + +Using `kafka.NewWriter` + +```go +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + TLS: &tls.Config{...tls config...}, +} + +w := kafka.NewWriter(kafka.WriterConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + Topic: "topic-A", + Balancer: &kafka.Hash{}, + Dialer: dialer, +}) +``` +Note that `kafka.NewWriter` and `kafka.WriterConfig` are deprecated and will be removed in a future release. + +## SASL Support + +You can specify an option on the `Dialer` to use SASL authentication. The `Dialer` can be used directly to open a `Conn` or it can be passed to a `Reader` or `Writer` via their respective configs. If the `SASLMechanism` field is `nil`, it will not authenticate with SASL. + +### SASL Authentication Types + +#### [Plain](https://godoc.org/github.com/segmentio/kafka-go/sasl/plain#Mechanism) +```go +mechanism := plain.Mechanism{ + Username: "username", + Password: "password", +} +``` + +#### [SCRAM](https://godoc.org/github.com/segmentio/kafka-go/sasl/scram#Mechanism) +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} +``` + +### Connection + +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} + +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + SASLMechanism: mechanism, +} + +conn, err := dialer.DialContext(ctx, "tcp", "localhost:9093") +``` + + +### Reader + +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} + +dialer := &kafka.Dialer{ + Timeout: 10 * time.Second, + DualStack: true, + SASLMechanism: mechanism, +} + +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092","localhost:9093", "localhost:9094"}, + GroupID: "consumer-group-id", + Topic: "topic-A", + Dialer: dialer, +}) +``` + +### Writer + +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} + +// Transports are responsible for managing connection pools and other resources, +// it's generally best to create a few of these and share them across your +// application. +sharedTransport := &kafka.Transport{ + SASL: mechanism, +} + +w := kafka.Writer{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Topic: "topic-A", + Balancer: &kafka.Hash{}, + Transport: sharedTransport, +} +``` + +### Client + +```go +mechanism, err := scram.Mechanism(scram.SHA512, "username", "password") +if err != nil { + panic(err) +} + +// Transports are responsible for managing connection pools and other resources, +// it's generally best to create a few of these and share them across your +// application. +sharedTransport := &kafka.Transport{ + SASL: mechanism, +} + +client := &kafka.Client{ + Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), + Timeout: 10 * time.Second, + Transport: sharedTransport, +} +``` + +#### Reading all messages within a time range + +```go +startTime := time.Now().Add(-time.Hour) +endTime := time.Now() +batchSize := int(10e6) // 10MB + +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + Topic: "my-topic1", + Partition: 0, + MaxBytes: batchSize, +}) + +r.SetOffsetAt(context.Background(), startTime) + +for { + m, err := r.ReadMessage(context.Background()) + + if err != nil { + break + } + if m.Time.After(endTime) { + break + } + // TODO: process message + fmt.Printf("message at offset %d: %s = %s\n", m.Offset, string(m.Key), string(m.Value)) +} + +if err := r.Close(); err != nil { + log.Fatal("failed to close reader:", err) +} +``` + + +## Logging + +For visiblity into the operations of the Reader/Writer types, configure a logger on creation. + + +### Reader + +```go +func logf(msg string, a ...interface{}) { + fmt.Printf(msg, a...) + fmt.Println() +} + +r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: []string{"localhost:9092", "localhost:9093", "localhost:9094"}, + Topic: "my-topic1", + Partition: 0, + Logger: kafka.LoggerFunc(logf), + ErrorLogger: kafka.LoggerFunc(logf), +}) +``` + +### Writer + +```go +func logf(msg string, a ...interface{}) { + fmt.Printf(msg, a...) + fmt.Println() +} + +w := &kafka.Writer{ + Addr: kafka.TCP("localhost:9092"), + Topic: "topic", + Logger: kafka.LoggerFunc(logf), + ErrorLogger: kafka.LoggerFunc(logf), +} +``` + + + +## Testing + +Subtle behavior changes in later Kafka versions have caused some historical tests to break, if you are running against Kafka 2.3.1 or later, exporting the `KAFKA_SKIP_NETTEST=1` environment variables will skip those tests. + +Run Kafka locally in docker + +```bash +docker-compose up -d +``` + +Run tests + +```bash +KAFKA_VERSION=2.3.1 \ + KAFKA_SKIP_NETTEST=1 \ + go test -race ./... +``` diff --git a/vendor/github.com/segmentio/kafka-go/addoffsetstotxn.go b/vendor/github.com/segmentio/kafka-go/addoffsetstotxn.go new file mode 100644 index 00000000000..dd83edb3a1d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/addoffsetstotxn.go @@ -0,0 +1,67 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/addoffsetstotxn" +) + +// AddOffsetsToTxnRequest is the request structure for the AddOffsetsToTxn function. +type AddOffsetsToTxnRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key + TransactionalID string + + // The Producer ID (PID) for the current producer session; + // received from an InitProducerID request. + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int + + // The unique group identifier. + GroupID string +} + +// AddOffsetsToTxnResponse is the response structure for the AddOffsetsToTxn function. +type AddOffsetsToTxnResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // An error that may have occurred when attempting to add the offsets + // to a transaction. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error +} + +// AddOffsetsToTnx sends an add offsets to txn request to a kafka broker and returns the response. +func (c *Client) AddOffsetsToTxn( + ctx context.Context, + req *AddOffsetsToTxnRequest, +) (*AddOffsetsToTxnResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &addoffsetstotxn.Request{ + TransactionalID: req.TransactionalID, + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + GroupID: req.GroupID, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AddOffsetsToTxn: %w", err) + } + + r := m.(*addoffsetstotxn.Response) + + res := &AddOffsetsToTxnResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Error: makeError(r.ErrorCode, ""), + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/addpartitionstotxn.go b/vendor/github.com/segmentio/kafka-go/addpartitionstotxn.go new file mode 100644 index 00000000000..74792135e6f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/addpartitionstotxn.go @@ -0,0 +1,108 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/addpartitionstotxn" +) + +// AddPartitionToTxn represents a partition to be added +// to a transaction. +type AddPartitionToTxn struct { + // Partition is the ID of a partition to add to the transaction. + Partition int +} + +// AddPartitionsToTxnRequest is the request structure fo the AddPartitionsToTxn function. +type AddPartitionsToTxnRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key + TransactionalID string + + // The Producer ID (PID) for the current producer session; + // received from an InitProducerID request. + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int + + // Mappings of topic names to lists of partitions. + Topics map[string][]AddPartitionToTxn +} + +// AddPartitionsToTxnResponse is the response structure for the AddPartitionsToTxn function. +type AddPartitionsToTxnResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Mappings of topic names to partitions being added to a transactions. + Topics map[string][]AddPartitionToTxnPartition +} + +// AddPartitionToTxnPartition represents the state of a single partition +// in response to adding to a transaction. +type AddPartitionToTxnPartition struct { + // The ID of the partition. + Partition int + + // An error that may have occurred when attempting to add the partition + // to a transaction. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error +} + +// AddPartitionsToTnx sends an add partitions to txn request to a kafka broker and returns the response. +func (c *Client) AddPartitionsToTxn( + ctx context.Context, + req *AddPartitionsToTxnRequest, +) (*AddPartitionsToTxnResponse, error) { + protoReq := &addpartitionstotxn.Request{ + TransactionalID: req.TransactionalID, + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + } + protoReq.Topics = make([]addpartitionstotxn.RequestTopic, 0, len(req.Topics)) + + for topic, partitions := range req.Topics { + reqTopic := addpartitionstotxn.RequestTopic{ + Name: topic, + Partitions: make([]int32, len(partitions)), + } + for i, partition := range partitions { + reqTopic.Partitions[i] = int32(partition.Partition) + } + protoReq.Topics = append(protoReq.Topics, reqTopic) + } + + m, err := c.roundTrip(ctx, req.Addr, protoReq) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AddPartitionsToTxn: %w", err) + } + + r := m.(*addpartitionstotxn.Response) + + res := &AddPartitionsToTxnResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Topics: make(map[string][]AddPartitionToTxnPartition, len(r.Results)), + } + + for _, result := range r.Results { + partitions := make([]AddPartitionToTxnPartition, 0, len(result.Results)) + for _, rp := range result.Results { + partitions = append(partitions, AddPartitionToTxnPartition{ + Partition: int(rp.PartitionIndex), + Error: makeError(rp.ErrorCode, ""), + }) + } + res.Topics[result.Name] = partitions + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/address.go b/vendor/github.com/segmentio/kafka-go/address.go new file mode 100644 index 00000000000..f332b7b0b21 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/address.go @@ -0,0 +1,64 @@ +package kafka + +import ( + "net" + "strings" +) + +// TCP constructs an address with the network set to "tcp". +func TCP(address ...string) net.Addr { return makeNetAddr("tcp", address) } + +func makeNetAddr(network string, addresses []string) net.Addr { + switch len(addresses) { + case 0: + return nil // maybe panic instead? + case 1: + return makeAddr(network, addresses[0]) + default: + return makeMultiAddr(network, addresses) + } +} + +func makeAddr(network, address string) net.Addr { + return &networkAddress{ + network: network, + address: canonicalAddress(address), + } +} + +func makeMultiAddr(network string, addresses []string) net.Addr { + multi := make(multiAddr, len(addresses)) + for i, address := range addresses { + multi[i] = makeAddr(network, address) + } + return multi +} + +type networkAddress struct { + network string + address string +} + +func (a *networkAddress) Network() string { return a.network } + +func (a *networkAddress) String() string { return a.address } + +type multiAddr []net.Addr + +func (m multiAddr) Network() string { return m.join(net.Addr.Network) } + +func (m multiAddr) String() string { return m.join(net.Addr.String) } + +func (m multiAddr) join(f func(net.Addr) string) string { + switch len(m) { + case 0: + return "" + case 1: + return f(m[0]) + } + s := make([]string, len(m)) + for i, a := range m { + s[i] = f(a) + } + return strings.Join(s, ",") +} diff --git a/vendor/github.com/segmentio/kafka-go/alterclientquotas.go b/vendor/github.com/segmentio/kafka-go/alterclientquotas.go new file mode 100644 index 00000000000..7a926e5c49f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/alterclientquotas.go @@ -0,0 +1,131 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/alterclientquotas" +) + +// AlterClientQuotasRequest represents a request sent to a kafka broker to +// alter client quotas. +type AlterClientQuotasRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of client quotas entries to alter. + Entries []AlterClientQuotaEntry + + // Whether the alteration should be validated, but not performed. + ValidateOnly bool +} + +type AlterClientQuotaEntry struct { + // The quota entities to alter. + Entities []AlterClientQuotaEntity + + // An individual quota configuration entry to alter. + Ops []AlterClientQuotaOps +} + +type AlterClientQuotaEntity struct { + // The quota entity type. + EntityType string + + // The name of the quota entity, or null if the default. + EntityName string +} + +type AlterClientQuotaOps struct { + // The quota configuration key. + Key string + + // The quota configuration value to set, otherwise ignored if the value is to be removed. + Value float64 + + // Whether the quota configuration value should be removed, otherwise set. + Remove bool +} + +type AlterClientQuotaResponseQuotas struct { + // Error is set to a non-nil value including the code and message if a top-level + // error was encountered when doing the update. + Error error + + // The altered quota entities. + Entities []AlterClientQuotaEntity +} + +// AlterClientQuotasResponse represents a response from a kafka broker to an alter client +// quotas request. +type AlterClientQuotasResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // List of altered client quotas responses. + Entries []AlterClientQuotaResponseQuotas +} + +// AlterClientQuotas sends client quotas alteration request to a kafka broker and returns +// the response. +func (c *Client) AlterClientQuotas(ctx context.Context, req *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) { + entries := make([]alterclientquotas.Entry, len(req.Entries)) + + for entryIdx, entry := range req.Entries { + entities := make([]alterclientquotas.Entity, len(entry.Entities)) + for entityIdx, entity := range entry.Entities { + entities[entityIdx] = alterclientquotas.Entity{ + EntityType: entity.EntityType, + EntityName: entity.EntityName, + } + } + + ops := make([]alterclientquotas.Ops, len(entry.Ops)) + for opsIdx, op := range entry.Ops { + ops[opsIdx] = alterclientquotas.Ops{ + Key: op.Key, + Value: op.Value, + Remove: op.Remove, + } + } + + entries[entryIdx] = alterclientquotas.Entry{ + Entities: entities, + Ops: ops, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &alterclientquotas.Request{ + Entries: entries, + ValidateOnly: req.ValidateOnly, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AlterClientQuotas: %w", err) + } + + res := m.(*alterclientquotas.Response) + responseEntries := make([]AlterClientQuotaResponseQuotas, len(res.Results)) + + for responseEntryIdx, responseEntry := range res.Results { + responseEntities := make([]AlterClientQuotaEntity, len(responseEntry.Entities)) + for responseEntityIdx, responseEntity := range responseEntry.Entities { + responseEntities[responseEntityIdx] = AlterClientQuotaEntity{ + EntityType: responseEntity.EntityType, + EntityName: responseEntity.EntityName, + } + } + + responseEntries[responseEntryIdx] = AlterClientQuotaResponseQuotas{ + Error: makeError(responseEntry.ErrorCode, responseEntry.ErrorMessage), + Entities: responseEntities, + } + } + ret := &AlterClientQuotasResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Entries: responseEntries, + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/alterconfigs.go b/vendor/github.com/segmentio/kafka-go/alterconfigs.go new file mode 100644 index 00000000000..c994506a146 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/alterconfigs.go @@ -0,0 +1,107 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/alterconfigs" +) + +// AlterConfigsRequest represents a request sent to a kafka broker to alter configs. +type AlterConfigsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of resources to update. + Resources []AlterConfigRequestResource + + // When set to true, topics are not created but the configuration is + // validated as if they were. + ValidateOnly bool +} + +type AlterConfigRequestResource struct { + // Resource Type + ResourceType ResourceType + + // Resource Name + ResourceName string + + // Configs is a list of configuration updates. + Configs []AlterConfigRequestConfig +} + +type AlterConfigRequestConfig struct { + // Configuration key name + Name string + + // The value to set for the configuration key. + Value string +} + +// AlterConfigsResponse represents a response from a kafka broker to an alter config request. +type AlterConfigsResponse struct { + // Duration for which the request was throttled due to a quota violation. + Throttle time.Duration + + // Mapping of topic names to errors that occurred while attempting to create + // the topics. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[AlterConfigsResponseResource]error +} + +// AlterConfigsResponseResource helps map errors to specific resources in an +// alter config response. +type AlterConfigsResponseResource struct { + Type int8 + Name string +} + +// AlterConfigs sends a config altering request to a kafka broker and returns the +// response. +func (c *Client) AlterConfigs(ctx context.Context, req *AlterConfigsRequest) (*AlterConfigsResponse, error) { + resources := make([]alterconfigs.RequestResources, len(req.Resources)) + + for i, t := range req.Resources { + configs := make([]alterconfigs.RequestConfig, len(t.Configs)) + for j, v := range t.Configs { + configs[j] = alterconfigs.RequestConfig{ + Name: v.Name, + Value: v.Value, + } + } + resources[i] = alterconfigs.RequestResources{ + ResourceType: int8(t.ResourceType), + ResourceName: t.ResourceName, + Configs: configs, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &alterconfigs.Request{ + Resources: resources, + ValidateOnly: req.ValidateOnly, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).AlterConfigs: %w", err) + } + + res := m.(*alterconfigs.Response) + ret := &AlterConfigsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make(map[AlterConfigsResponseResource]error, len(res.Responses)), + } + + for _, t := range res.Responses { + ret.Errors[AlterConfigsResponseResource{ + Type: t.ResourceType, + Name: t.ResourceName, + }] = makeError(t.ErrorCode, t.ErrorMessage) + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/alterpartitionreassignments.go b/vendor/github.com/segmentio/kafka-go/alterpartitionreassignments.go new file mode 100644 index 00000000000..ec76dbd8baf --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/alterpartitionreassignments.go @@ -0,0 +1,115 @@ +package kafka + +import ( + "context" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/alterpartitionreassignments" +) + +// AlterPartitionReassignmentsRequest is a request to the AlterPartitionReassignments API. +type AlterPartitionReassignmentsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Topic is the name of the topic to alter partitions in. + Topic string + + // Assignments is the list of partition reassignments to submit to the API. + Assignments []AlterPartitionReassignmentsRequestAssignment + + // Timeout is the amount of time to wait for the request to complete. + Timeout time.Duration +} + +// AlterPartitionReassignmentsRequestAssignment contains the requested reassignments for a single +// partition. +type AlterPartitionReassignmentsRequestAssignment struct { + // PartitionID is the ID of the partition to make the reassignments in. + PartitionID int + + // BrokerIDs is a slice of brokers to set the partition replicas to. + BrokerIDs []int +} + +// AlterPartitionReassignmentsResponse is a response from the AlterPartitionReassignments API. +type AlterPartitionReassignmentsResponse struct { + // Error is set to a non-nil value including the code and message if a top-level + // error was encountered when doing the update. + Error error + + // PartitionResults contains the specific results for each partition. + PartitionResults []AlterPartitionReassignmentsResponsePartitionResult +} + +// AlterPartitionReassignmentsResponsePartitionResult contains the detailed result of +// doing reassignments for a single partition. +type AlterPartitionReassignmentsResponsePartitionResult struct { + // PartitionID is the ID of the partition that was altered. + PartitionID int + + // Error is set to a non-nil value including the code and message if an error was encountered + // during the update for this partition. + Error error +} + +func (c *Client) AlterPartitionReassignments( + ctx context.Context, + req *AlterPartitionReassignmentsRequest, +) (*AlterPartitionReassignmentsResponse, error) { + apiPartitions := []alterpartitionreassignments.RequestPartition{} + + for _, assignment := range req.Assignments { + replicas := []int32{} + for _, brokerID := range assignment.BrokerIDs { + replicas = append(replicas, int32(brokerID)) + } + + apiPartitions = append( + apiPartitions, + alterpartitionreassignments.RequestPartition{ + PartitionIndex: int32(assignment.PartitionID), + Replicas: replicas, + }, + ) + } + + apiReq := &alterpartitionreassignments.Request{ + TimeoutMs: int32(req.Timeout.Milliseconds()), + Topics: []alterpartitionreassignments.RequestTopic{ + { + Name: req.Topic, + Partitions: apiPartitions, + }, + }, + } + + protoResp, err := c.roundTrip( + ctx, + req.Addr, + apiReq, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*alterpartitionreassignments.Response) + + resp := &AlterPartitionReassignmentsResponse{ + Error: makeError(apiResp.ErrorCode, apiResp.ErrorMessage), + } + + for _, topicResult := range apiResp.Results { + for _, partitionResult := range topicResult.Partitions { + resp.PartitionResults = append( + resp.PartitionResults, + AlterPartitionReassignmentsResponsePartitionResult{ + PartitionID: int(partitionResult.PartitionIndex), + Error: makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage), + }, + ) + } + } + + return resp, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/apiversions.go b/vendor/github.com/segmentio/kafka-go/apiversions.go new file mode 100644 index 00000000000..52412b81146 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/apiversions.go @@ -0,0 +1,72 @@ +package kafka + +import ( + "context" + "net" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/apiversions" +) + +// ApiVersionsRequest is a request to the ApiVersions API. +type ApiVersionsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr +} + +// ApiVersionsResponse is a response from the ApiVersions API. +type ApiVersionsResponse struct { + // Error is set to a non-nil value if an error was encountered. + Error error + + // ApiKeys contains the specific details of each supported API. + ApiKeys []ApiVersionsResponseApiKey +} + +// ApiVersionsResponseApiKey includes the details of which versions are supported for a single API. +type ApiVersionsResponseApiKey struct { + // ApiKey is the ID of the API. + ApiKey int + + // ApiName is a human-friendly description of the API. + ApiName string + + // MinVersion is the minimum API version supported by the broker. + MinVersion int + + // MaxVersion is the maximum API version supported by the broker. + MaxVersion int +} + +func (c *Client) ApiVersions( + ctx context.Context, + req *ApiVersionsRequest, +) (*ApiVersionsResponse, error) { + apiReq := &apiversions.Request{} + protoResp, err := c.roundTrip( + ctx, + req.Addr, + apiReq, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*apiversions.Response) + + resp := &ApiVersionsResponse{ + Error: makeError(apiResp.ErrorCode, ""), + } + for _, apiKey := range apiResp.ApiKeys { + resp.ApiKeys = append( + resp.ApiKeys, + ApiVersionsResponseApiKey{ + ApiKey: int(apiKey.ApiKey), + ApiName: protocol.ApiKey(apiKey.ApiKey).String(), + MinVersion: int(apiKey.MinVersion), + MaxVersion: int(apiKey.MaxVersion), + }, + ) + } + + return resp, err +} diff --git a/vendor/github.com/segmentio/kafka-go/balancer.go b/vendor/github.com/segmentio/kafka-go/balancer.go new file mode 100644 index 00000000000..f4768cf8835 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/balancer.go @@ -0,0 +1,340 @@ +package kafka + +import ( + "hash" + "hash/crc32" + "hash/fnv" + "math/rand" + "sort" + "sync" + "sync/atomic" +) + +// The Balancer interface provides an abstraction of the message distribution +// logic used by Writer instances to route messages to the partitions available +// on a kafka cluster. +// +// Balancers must be safe to use concurrently from multiple goroutines. +type Balancer interface { + // Balance receives a message and a set of available partitions and + // returns the partition number that the message should be routed to. + // + // An application should refrain from using a balancer to manage multiple + // sets of partitions (from different topics for examples), use one balancer + // instance for each partition set, so the balancer can detect when the + // partitions change and assume that the kafka topic has been rebalanced. + Balance(msg Message, partitions ...int) (partition int) +} + +// BalancerFunc is an implementation of the Balancer interface that makes it +// possible to use regular functions to distribute messages across partitions. +type BalancerFunc func(Message, ...int) int + +// Balance calls f, satisfies the Balancer interface. +func (f BalancerFunc) Balance(msg Message, partitions ...int) int { + return f(msg, partitions...) +} + +// RoundRobin is an Balancer implementation that equally distributes messages +// across all available partitions. +type RoundRobin struct { + // Use a 32 bits integer so RoundRobin values don't need to be aligned to + // apply atomic increments. + offset uint32 +} + +// Balance satisfies the Balancer interface. +func (rr *RoundRobin) Balance(msg Message, partitions ...int) int { + return rr.balance(partitions) +} + +func (rr *RoundRobin) balance(partitions []int) int { + length := uint32(len(partitions)) + offset := atomic.AddUint32(&rr.offset, 1) - 1 + return partitions[offset%length] +} + +// LeastBytes is a Balancer implementation that routes messages to the partition +// that has received the least amount of data. +// +// Note that no coordination is done between multiple producers, having good +// balancing relies on the fact that each producer using a LeastBytes balancer +// should produce well balanced messages. +type LeastBytes struct { + mutex sync.Mutex + counters []leastBytesCounter +} + +type leastBytesCounter struct { + partition int + bytes uint64 +} + +// Balance satisfies the Balancer interface. +func (lb *LeastBytes) Balance(msg Message, partitions ...int) int { + lb.mutex.Lock() + defer lb.mutex.Unlock() + + // partitions change + if len(partitions) != len(lb.counters) { + lb.counters = lb.makeCounters(partitions...) + } + + minBytes := lb.counters[0].bytes + minIndex := 0 + + for i, c := range lb.counters[1:] { + if c.bytes < minBytes { + minIndex = i + 1 + minBytes = c.bytes + } + } + + c := &lb.counters[minIndex] + c.bytes += uint64(len(msg.Key)) + uint64(len(msg.Value)) + return c.partition +} + +func (lb *LeastBytes) makeCounters(partitions ...int) (counters []leastBytesCounter) { + counters = make([]leastBytesCounter, len(partitions)) + + for i, p := range partitions { + counters[i].partition = p + } + + sort.Slice(counters, func(i int, j int) bool { + return counters[i].partition < counters[j].partition + }) + return +} + +var ( + fnv1aPool = &sync.Pool{ + New: func() interface{} { + return fnv.New32a() + }, + } +) + +// Hash is a Balancer that uses the provided hash function to determine which +// partition to route messages to. This ensures that messages with the same key +// are routed to the same partition. +// +// The logic to calculate the partition is: +// +// hasher.Sum32() % len(partitions) => partition +// +// By default, Hash uses the FNV-1a algorithm. This is the same algorithm used +// by the Sarama Producer and ensures that messages produced by kafka-go will +// be delivered to the same topics that the Sarama producer would be delivered to. +type Hash struct { + rr RoundRobin + Hasher hash.Hash32 + + // lock protects Hasher while calculating the hash code. It is assumed that + // the Hasher field is read-only once the Balancer is created, so as a + // performance optimization, reads of the field are not protected. + lock sync.Mutex +} + +func (h *Hash) Balance(msg Message, partitions ...int) int { + if msg.Key == nil { + return h.rr.Balance(msg, partitions...) + } + + hasher := h.Hasher + if hasher != nil { + h.lock.Lock() + defer h.lock.Unlock() + } else { + hasher = fnv1aPool.Get().(hash.Hash32) + defer fnv1aPool.Put(hasher) + } + + hasher.Reset() + if _, err := hasher.Write(msg.Key); err != nil { + panic(err) + } + + // uses same algorithm that Sarama's hashPartitioner uses + // note the type conversions here. if the uint32 hash code is not cast to + // an int32, we do not get the same result as sarama. + partition := int32(hasher.Sum32()) % int32(len(partitions)) + if partition < 0 { + partition = -partition + } + + return int(partition) +} + +// ReferenceHash is a Balancer that uses the provided hash function to determine which +// partition to route messages to. This ensures that messages with the same key +// are routed to the same partition. +// +// The logic to calculate the partition is: +// +// (int32(hasher.Sum32()) & 0x7fffffff) % len(partitions) => partition +// +// By default, ReferenceHash uses the FNV-1a algorithm. This is the same algorithm as +// the Sarama NewReferenceHashPartitioner and ensures that messages produced by kafka-go will +// be delivered to the same topics that the Sarama producer would be delivered to. +type ReferenceHash struct { + rr randomBalancer + Hasher hash.Hash32 + + // lock protects Hasher while calculating the hash code. It is assumed that + // the Hasher field is read-only once the Balancer is created, so as a + // performance optimization, reads of the field are not protected. + lock sync.Mutex +} + +func (h *ReferenceHash) Balance(msg Message, partitions ...int) int { + if msg.Key == nil { + return h.rr.Balance(msg, partitions...) + } + + hasher := h.Hasher + if hasher != nil { + h.lock.Lock() + defer h.lock.Unlock() + } else { + hasher = fnv1aPool.Get().(hash.Hash32) + defer fnv1aPool.Put(hasher) + } + + hasher.Reset() + if _, err := hasher.Write(msg.Key); err != nil { + panic(err) + } + + // uses the same algorithm as the Sarama's referenceHashPartitioner. + // note the type conversions here. if the uint32 hash code is not cast to + // an int32, we do not get the same result as sarama. + partition := (int32(hasher.Sum32()) & 0x7fffffff) % int32(len(partitions)) + return int(partition) +} + +type randomBalancer struct { + mock int // mocked return value, used for testing +} + +func (b randomBalancer) Balance(msg Message, partitions ...int) (partition int) { + if b.mock != 0 { + return b.mock + } + return partitions[rand.Int()%len(partitions)] +} + +// CRC32Balancer is a Balancer that uses the CRC32 hash function to determine +// which partition to route messages to. This ensures that messages with the +// same key are routed to the same partition. This balancer is compatible with +// the built-in hash partitioners in librdkafka and the language bindings that +// are built on top of it, including the +// github.com/confluentinc/confluent-kafka-go Go package. +// +// With the Consistent field false (default), this partitioner is equivalent to +// the "consistent_random" setting in librdkafka. When Consistent is true, this +// partitioner is equivalent to the "consistent" setting. The latter will hash +// empty or nil keys into the same partition. +// +// Unless you are absolutely certain that all your messages will have keys, it's +// best to leave the Consistent flag off. Otherwise, you run the risk of +// creating a very hot partition. +type CRC32Balancer struct { + Consistent bool + random randomBalancer +} + +func (b CRC32Balancer) Balance(msg Message, partitions ...int) (partition int) { + // NOTE: the crc32 balancers in librdkafka don't differentiate between nil + // and empty keys. both cases are treated as unset. + if len(msg.Key) == 0 && !b.Consistent { + return b.random.Balance(msg, partitions...) + } + + idx := crc32.ChecksumIEEE(msg.Key) % uint32(len(partitions)) + return partitions[idx] +} + +// Murmur2Balancer is a Balancer that uses the Murmur2 hash function to +// determine which partition to route messages to. This ensures that messages +// with the same key are routed to the same partition. This balancer is +// compatible with the partitioner used by the Java library and by librdkafka's +// "murmur2" and "murmur2_random" partitioners. +// +// With the Consistent field false (default), this partitioner is equivalent to +// the "murmur2_random" setting in librdkafka. When Consistent is true, this +// partitioner is equivalent to the "murmur2" setting. The latter will hash +// nil keys into the same partition. Empty, non-nil keys are always hashed to +// the same partition regardless of configuration. +// +// Unless you are absolutely certain that all your messages will have keys, it's +// best to leave the Consistent flag off. Otherwise, you run the risk of +// creating a very hot partition. +// +// Note that the librdkafka documentation states that the "murmur2_random" is +// functionally equivalent to the default Java partitioner. That's because the +// Java partitioner will use a round robin balancer instead of random on nil +// keys. We choose librdkafka's implementation because it arguably has a larger +// install base. +type Murmur2Balancer struct { + Consistent bool + random randomBalancer +} + +func (b Murmur2Balancer) Balance(msg Message, partitions ...int) (partition int) { + // NOTE: the murmur2 balancers in java and librdkafka treat a nil key as + // non-existent while treating an empty slice as a defined value. + if msg.Key == nil && !b.Consistent { + return b.random.Balance(msg, partitions...) + } + + idx := (murmur2(msg.Key) & 0x7fffffff) % uint32(len(partitions)) + return partitions[idx] +} + +// Go port of the Java library's murmur2 function. +// https://github.com/apache/kafka/blob/1.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L353 +func murmur2(data []byte) uint32 { + length := len(data) + const ( + seed uint32 = 0x9747b28c + // 'm' and 'r' are mixing constants generated offline. + // They're not really 'magic', they just happen to work well. + m = 0x5bd1e995 + r = 24 + ) + + // Initialize the hash to a random value + h := seed ^ uint32(length) + length4 := length / 4 + + for i := 0; i < length4; i++ { + i4 := i * 4 + k := (uint32(data[i4+0]) & 0xff) + ((uint32(data[i4+1]) & 0xff) << 8) + ((uint32(data[i4+2]) & 0xff) << 16) + ((uint32(data[i4+3]) & 0xff) << 24) + k *= m + k ^= k >> r + k *= m + h *= m + h ^= k + } + + // Handle the last few bytes of the input array + extra := length % 4 + if extra >= 3 { + h ^= (uint32(data[(length & ^3)+2]) & 0xff) << 16 + } + if extra >= 2 { + h ^= (uint32(data[(length & ^3)+1]) & 0xff) << 8 + } + if extra >= 1 { + h ^= uint32(data[length & ^3]) & 0xff + h *= m + } + + h ^= h >> 13 + h *= m + h ^= h >> 15 + + return h +} diff --git a/vendor/github.com/segmentio/kafka-go/batch.go b/vendor/github.com/segmentio/kafka-go/batch.go new file mode 100644 index 00000000000..19dcef8cdc4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/batch.go @@ -0,0 +1,313 @@ +package kafka + +import ( + "bufio" + "errors" + "io" + "sync" + "time" +) + +// A Batch is an iterator over a sequence of messages fetched from a kafka +// server. +// +// Batches are created by calling (*Conn).ReadBatch. They hold a internal lock +// on the connection, which is released when the batch is closed. Failing to +// call a batch's Close method will likely result in a dead-lock when trying to +// use the connection. +// +// Batches are safe to use concurrently from multiple goroutines. +type Batch struct { + mutex sync.Mutex + conn *Conn + lock *sync.Mutex + msgs *messageSetReader + deadline time.Time + throttle time.Duration + topic string + partition int + offset int64 + highWaterMark int64 + err error + // The last offset in the batch. + // + // We use lastOffset to skip offsets that have been compacted away. + // + // We store lastOffset because we get lastOffset when we read a new message + // but only try to handle compaction when we receive an EOF. However, when + // we get an EOF we do not get the lastOffset. So there is a mismatch + // between when we receive it and need to use it. + lastOffset int64 +} + +// Throttle gives the throttling duration applied by the kafka server on the +// connection. +func (batch *Batch) Throttle() time.Duration { + return batch.throttle +} + +// Watermark returns the current highest watermark in a partition. +func (batch *Batch) HighWaterMark() int64 { + return batch.highWaterMark +} + +// Partition returns the batch partition. +func (batch *Batch) Partition() int { + return batch.partition +} + +// Offset returns the offset of the next message in the batch. +func (batch *Batch) Offset() int64 { + batch.mutex.Lock() + offset := batch.offset + batch.mutex.Unlock() + return offset +} + +// Close closes the batch, releasing the connection lock and returning an error +// if reading the batch failed for any reason. +func (batch *Batch) Close() error { + batch.mutex.Lock() + err := batch.close() + batch.mutex.Unlock() + return err +} + +func (batch *Batch) close() (err error) { + conn := batch.conn + lock := batch.lock + + batch.conn = nil + batch.lock = nil + + if batch.msgs != nil { + batch.msgs.discard() + } + + if batch.msgs != nil && batch.msgs.decompressed != nil { + releaseBuffer(batch.msgs.decompressed) + batch.msgs.decompressed = nil + } + + if err = batch.err; errors.Is(batch.err, io.EOF) { + err = nil + } + + if conn != nil { + conn.rdeadline.unsetConnReadDeadline() + conn.mutex.Lock() + conn.offset = batch.offset + conn.mutex.Unlock() + + if err != nil { + var kafkaError Error + if !errors.As(err, &kafkaError) && !errors.Is(err, io.ErrShortBuffer) { + conn.Close() + } + } + } + + if lock != nil { + lock.Unlock() + } + + return +} + +// Err returns a non-nil error if the batch is broken. This is the same error +// that would be returned by Read, ReadMessage or Close (except in the case of +// io.EOF which is never returned by Close). +// +// This method is useful when building retry mechanisms for (*Conn).ReadBatch, +// the program can check whether the batch carried a error before attempting to +// read the first message. +// +// Note that checking errors on a batch is optional, calling Read or ReadMessage +// is always valid and can be used to either read a message or an error in cases +// where that's convenient. +func (batch *Batch) Err() error { return batch.err } + +// Read reads the value of the next message from the batch into b, returning the +// number of bytes read, or an error if the next message couldn't be read. +// +// If an error is returned the batch cannot be used anymore and calling Read +// again will keep returning that error. All errors except io.EOF (indicating +// that the program consumed all messages from the batch) are also returned by +// Close. +// +// The method fails with io.ErrShortBuffer if the buffer passed as argument is +// too small to hold the message value. +func (batch *Batch) Read(b []byte) (int, error) { + n := 0 + + batch.mutex.Lock() + offset := batch.offset + + _, _, _, err := batch.readMessage( + func(r *bufio.Reader, size int, nbytes int) (int, error) { + if nbytes < 0 { + return size, nil + } + return discardN(r, size, nbytes) + }, + func(r *bufio.Reader, size int, nbytes int) (int, error) { + if nbytes < 0 { + return size, nil + } + // make sure there are enough bytes for the message value. return + // errShortRead if the message is truncated. + if nbytes > size { + return size, errShortRead + } + n = nbytes // return value + if nbytes > cap(b) { + nbytes = cap(b) + } + if nbytes > len(b) { + b = b[:nbytes] + } + nbytes, err := io.ReadFull(r, b[:nbytes]) + if err != nil { + return size - nbytes, err + } + return discardN(r, size-nbytes, n-nbytes) + }, + ) + + if err == nil && n > len(b) { + n, err = len(b), io.ErrShortBuffer + batch.err = io.ErrShortBuffer + batch.offset = offset // rollback + } + + batch.mutex.Unlock() + return n, err +} + +// ReadMessage reads and return the next message from the batch. +// +// Because this method allocate memory buffers for the message key and value +// it is less memory-efficient than Read, but has the advantage of never +// failing with io.ErrShortBuffer. +func (batch *Batch) ReadMessage() (Message, error) { + msg := Message{} + batch.mutex.Lock() + + var offset, timestamp int64 + var headers []Header + var err error + + offset, timestamp, headers, err = batch.readMessage( + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Key, remain, err = readNewBytes(r, size, nbytes) + return + }, + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Value, remain, err = readNewBytes(r, size, nbytes) + return + }, + ) + // A batch may start before the requested offset so skip messages + // until the requested offset is reached. + for batch.conn != nil && offset < batch.conn.offset { + if err != nil { + break + } + offset, timestamp, headers, err = batch.readMessage( + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Key, remain, err = readNewBytes(r, size, nbytes) + return + }, + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Value, remain, err = readNewBytes(r, size, nbytes) + return + }, + ) + } + + batch.mutex.Unlock() + msg.Topic = batch.topic + msg.Partition = batch.partition + msg.Offset = offset + msg.HighWaterMark = batch.highWaterMark + msg.Time = makeTime(timestamp) + msg.Headers = headers + + return msg, err +} + +func (batch *Batch) readMessage( + key func(*bufio.Reader, int, int) (int, error), + val func(*bufio.Reader, int, int) (int, error), +) (offset int64, timestamp int64, headers []Header, err error) { + if err = batch.err; err != nil { + return + } + + var lastOffset int64 + offset, lastOffset, timestamp, headers, err = batch.msgs.readMessage(batch.offset, key, val) + switch { + case err == nil: + batch.offset = offset + 1 + batch.lastOffset = lastOffset + case errors.Is(err, errShortRead): + // As an "optimization" kafka truncates the returned response after + // producing MaxBytes, which could then cause the code to return + // errShortRead. + err = batch.msgs.discard() + switch { + case err != nil: + // Since io.EOF is used by the batch to indicate that there is are + // no more messages to consume, it is crucial that any io.EOF errors + // on the underlying connection are repackaged. Otherwise, the + // caller can't tell the difference between a batch that was fully + // consumed or a batch whose connection is in an error state. + batch.err = dontExpectEOF(err) + case batch.msgs.remaining() == 0: + // Because we use the adjusted deadline we could end up returning + // before the actual deadline occurred. This is necessary otherwise + // timing out the connection for real could end up leaving it in an + // unpredictable state, which would require closing it. + // This design decision was made to maximize the chances of keeping + // the connection open, the trade off being to lose precision on the + // read deadline management. + err = checkTimeoutErr(batch.deadline) + batch.err = err + + // Checks the following: + // - `batch.err` for a "success" from the previous timeout check + // - `batch.msgs.lengthRemain` to ensure that this EOF is not due + // to MaxBytes truncation + // - `batch.lastOffset` to ensure that the message format contains + // `lastOffset` + if errors.Is(batch.err, io.EOF) && batch.msgs.lengthRemain == 0 && batch.lastOffset != -1 { + // Log compaction can create batches that end with compacted + // records so the normal strategy that increments the "next" + // offset as records are read doesn't work as the compacted + // records are "missing" and never get "read". + // + // In order to reliably reach the next non-compacted offset we + // jump past the saved lastOffset. + batch.offset = batch.lastOffset + 1 + } + } + default: + // Since io.EOF is used by the batch to indicate that there is are + // no more messages to consume, it is crucial that any io.EOF errors + // on the underlying connection are repackaged. Otherwise, the + // caller can't tell the difference between a batch that was fully + // consumed or a batch whose connection is in an error state. + batch.err = dontExpectEOF(err) + } + + return +} + +func checkTimeoutErr(deadline time.Time) (err error) { + if !deadline.IsZero() && time.Now().After(deadline) { + err = RequestTimedOut + } else { + err = io.EOF + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/buffer.go b/vendor/github.com/segmentio/kafka-go/buffer.go new file mode 100644 index 00000000000..5bf50c05fa7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/buffer.go @@ -0,0 +1,27 @@ +package kafka + +import ( + "bytes" + "sync" +) + +var bufferPool = sync.Pool{ + New: func() interface{} { return newBuffer() }, +} + +func newBuffer() *bytes.Buffer { + b := new(bytes.Buffer) + b.Grow(65536) + return b +} + +func acquireBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func releaseBuffer(b *bytes.Buffer) { + if b != nil { + b.Reset() + bufferPool.Put(b) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/client.go b/vendor/github.com/segmentio/kafka-go/client.go new file mode 100644 index 00000000000..d965040e871 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/client.go @@ -0,0 +1,146 @@ +package kafka + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol" +) + +const ( + defaultCreateTopicsTimeout = 2 * time.Second + defaultDeleteTopicsTimeout = 2 * time.Second + defaultCreatePartitionsTimeout = 2 * time.Second + defaultProduceTimeout = 500 * time.Millisecond + defaultMaxWait = 500 * time.Millisecond +) + +// Client is a high-level API to interract with kafka brokers. +// +// All methods of the Client type accept a context as first argument, which may +// be used to asynchronously cancel the requests. +// +// Clients are safe to use concurrently from multiple goroutines, as long as +// their configuration is not changed after first use. +type Client struct { + // Address of the kafka cluster (or specific broker) that the client will be + // sending requests to. + // + // This field is optional, the address may be provided in each request + // instead. The request address takes precedence if both were specified. + Addr net.Addr + + // Time limit for requests sent by this client. + // + // If zero, no timeout is applied. + Timeout time.Duration + + // A transport used to communicate with the kafka brokers. + // + // If nil, DefaultTransport is used. + Transport RoundTripper +} + +// A ConsumerGroup and Topic as these are both strings we define a type for +// clarity when passing to the Client as a function argument +// +// N.B TopicAndGroup is currently experimental! Therefore, it is subject to +// change, including breaking changes between MINOR and PATCH releases. +// +// DEPRECATED: this type will be removed in version 1.0, programs should +// migrate to use kafka.(*Client).OffsetFetch instead. +type TopicAndGroup struct { + Topic string + GroupId string +} + +// ConsumerOffsets returns a map[int]int64 of partition to committed offset for +// a consumer group id and topic. +// +// DEPRECATED: this method will be removed in version 1.0, programs should +// migrate to use kafka.(*Client).OffsetFetch instead. +func (c *Client) ConsumerOffsets(ctx context.Context, tg TopicAndGroup) (map[int]int64, error) { + metadata, err := c.Metadata(ctx, &MetadataRequest{ + Topics: []string{tg.Topic}, + }) + + if err != nil { + return nil, fmt.Errorf("failed to get topic metadata :%w", err) + } + + topic := metadata.Topics[0] + partitions := make([]int, len(topic.Partitions)) + + for i := range topic.Partitions { + partitions[i] = topic.Partitions[i].ID + } + + offsets, err := c.OffsetFetch(ctx, &OffsetFetchRequest{ + GroupID: tg.GroupId, + Topics: map[string][]int{ + tg.Topic: partitions, + }, + }) + + if err != nil { + return nil, fmt.Errorf("failed to get offsets: %w", err) + } + + topicOffsets := offsets.Topics[topic.Name] + partitionOffsets := make(map[int]int64, len(topicOffsets)) + + for _, off := range topicOffsets { + partitionOffsets[off.Partition] = off.CommittedOffset + } + + return partitionOffsets, nil +} + +func (c *Client) roundTrip(ctx context.Context, addr net.Addr, msg protocol.Message) (protocol.Message, error) { + if c.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, c.Timeout) + defer cancel() + } + + if addr == nil { + if addr = c.Addr; addr == nil { + return nil, errors.New("no address was given for the kafka cluster in the request or on the client") + } + } + + return c.transport().RoundTrip(ctx, addr, msg) +} + +func (c *Client) transport() RoundTripper { + if c.Transport != nil { + return c.Transport + } + return DefaultTransport +} + +func (c *Client) timeout(ctx context.Context, defaultTimeout time.Duration) time.Duration { + timeout := c.Timeout + + if deadline, ok := ctx.Deadline(); ok { + if remain := time.Until(deadline); remain < timeout { + timeout = remain + } + } + + if timeout > 0 { + // Half the timeout because it is communicated to kafka in multiple + // requests (e.g. Fetch, Produce, etc...), this adds buffer to account + // for network latency when waiting for the response from kafka. + return timeout / 2 + } + + return defaultTimeout +} + +func (c *Client) timeoutMs(ctx context.Context, defaultTimeout time.Duration) int32 { + return milliseconds(c.timeout(ctx, defaultTimeout)) +} diff --git a/vendor/github.com/segmentio/kafka-go/commit.go b/vendor/github.com/segmentio/kafka-go/commit.go new file mode 100644 index 00000000000..e7740d58aaf --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/commit.go @@ -0,0 +1,39 @@ +package kafka + +// A commit represents the instruction of publishing an update of the last +// offset read by a program for a topic and partition. +type commit struct { + topic string + partition int + offset int64 +} + +// makeCommit builds a commit value from a message, the resulting commit takes +// its topic, partition, and offset from the message. +func makeCommit(msg Message) commit { + return commit{ + topic: msg.Topic, + partition: msg.Partition, + offset: msg.Offset + 1, + } +} + +// makeCommits generates a slice of commits from a list of messages, it extracts +// the topic, partition, and offset of each message and builds the corresponding +// commit slice. +func makeCommits(msgs ...Message) []commit { + commits := make([]commit, len(msgs)) + + for i, m := range msgs { + commits[i] = makeCommit(m) + } + + return commits +} + +// commitRequest is the data type exchanged between the CommitMessages method +// and internals of the reader's implementation. +type commitRequest struct { + commits []commit + errch chan<- error +} diff --git a/vendor/github.com/segmentio/kafka-go/compress/compress.go b/vendor/github.com/segmentio/kafka-go/compress/compress.go new file mode 100644 index 00000000000..054bf03d0cd --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/compress.go @@ -0,0 +1,124 @@ +package compress + +import ( + "encoding" + "fmt" + "io" + "strconv" + "strings" + + "github.com/segmentio/kafka-go/compress/gzip" + "github.com/segmentio/kafka-go/compress/lz4" + "github.com/segmentio/kafka-go/compress/snappy" + "github.com/segmentio/kafka-go/compress/zstd" +) + +// Compression represents the compression applied to a record set. +type Compression int8 + +const ( + None Compression = 0 + Gzip Compression = 1 + Snappy Compression = 2 + Lz4 Compression = 3 + Zstd Compression = 4 +) + +func (c Compression) Codec() Codec { + if i := int(c); i >= 0 && i < len(Codecs) { + return Codecs[i] + } + return nil +} + +func (c Compression) String() string { + if codec := c.Codec(); codec != nil { + return codec.Name() + } + return "uncompressed" +} + +func (c Compression) MarshalText() ([]byte, error) { + return []byte(c.String()), nil +} + +func (c *Compression) UnmarshalText(b []byte) error { + switch string(b) { + case "none", "uncompressed": + *c = None + return nil + } + + for _, codec := range Codecs[None+1:] { + if codec.Name() == string(b) { + *c = Compression(codec.Code()) + return nil + } + } + + i, err := strconv.ParseInt(string(b), 10, 64) + if err == nil && i >= 0 && i < int64(len(Codecs)) { + *c = Compression(i) + return nil + } + + s := &strings.Builder{} + s.WriteString("none, uncompressed") + + for i, codec := range Codecs[None+1:] { + if i < (len(Codecs) - 1) { + s.WriteString(", ") + } else { + s.WriteString(", or ") + } + s.WriteString(codec.Name()) + } + + return fmt.Errorf("compression format must be one of %s, not %q", s, b) +} + +var ( + _ encoding.TextMarshaler = Compression(0) + _ encoding.TextUnmarshaler = (*Compression)(nil) +) + +// Codec represents a compression codec to encode and decode the messages. +// See : https://cwiki.apache.org/confluence/display/KAFKA/Compression +// +// A Codec must be safe for concurrent access by multiple go routines. +type Codec interface { + // Code returns the compression codec code + Code() int8 + + // Human-readable name for the codec. + Name() string + + // Constructs a new reader which decompresses data from r. + NewReader(r io.Reader) io.ReadCloser + + // Constructs a new writer which writes compressed data to w. + NewWriter(w io.Writer) io.WriteCloser +} + +var ( + // The global gzip codec installed on the Codecs table. + GzipCodec gzip.Codec + + // The global snappy codec installed on the Codecs table. + SnappyCodec snappy.Codec + + // The global lz4 codec installed on the Codecs table. + Lz4Codec lz4.Codec + + // The global zstd codec installed on the Codecs table. + ZstdCodec zstd.Codec + + // The global table of compression codecs supported by the kafka protocol. + Codecs = [...]Codec{ + None: nil, + Gzip: &GzipCodec, + Snappy: &SnappyCodec, + Lz4: &Lz4Codec, + Zstd: &ZstdCodec, + } +) diff --git a/vendor/github.com/segmentio/kafka-go/compress/gzip/gzip.go b/vendor/github.com/segmentio/kafka-go/compress/gzip/gzip.go new file mode 100644 index 00000000000..ad5009c396a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/gzip/gzip.go @@ -0,0 +1,123 @@ +package gzip + +import ( + "io" + "sync" + + "github.com/klauspost/compress/gzip" +) + +var ( + readerPool sync.Pool +) + +// Codec is the implementation of a compress.Codec which supports creating +// readers and writers for kafka messages compressed with gzip. +type Codec struct { + // The compression level to configure on writers created by this codec. + // Acceptable values are defined in the standard gzip package. + // + // Default to gzip.DefaultCompressionLevel. + Level int + + writerPool sync.Pool +} + +// Code implements the compress.Codec interface. +func (c *Codec) Code() int8 { return 1 } + +// Name implements the compress.Codec interface. +func (c *Codec) Name() string { return "gzip" } + +// NewReader implements the compress.Codec interface. +func (c *Codec) NewReader(r io.Reader) io.ReadCloser { + var err error + z, _ := readerPool.Get().(*gzip.Reader) + if z != nil { + err = z.Reset(r) + } else { + z, err = gzip.NewReader(r) + } + if err != nil { + if z != nil { + readerPool.Put(z) + } + return &errorReader{err: err} + } + return &reader{Reader: z} +} + +// NewWriter implements the compress.Codec interface. +func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { + x := c.writerPool.Get() + z, _ := x.(*gzip.Writer) + if z == nil { + x, err := gzip.NewWriterLevel(w, c.level()) + if err != nil { + return &errorWriter{err: err} + } + z = x + } else { + z.Reset(w) + } + return &writer{codec: c, Writer: z} +} + +func (c *Codec) level() int { + if c.Level != 0 { + return c.Level + } + return gzip.DefaultCompression +} + +type reader struct{ *gzip.Reader } + +func (r *reader) Close() (err error) { + if z := r.Reader; z != nil { + r.Reader = nil + err = z.Close() + // Pass it an empty reader, which is a zero-size value implementing the + // flate.Reader interface to avoid the construction of a bufio.Reader in + // the call to Reset. + // + // Note: we could also not reset the reader at all, but that would cause + // the underlying reader to be retained until the gzip.Reader is freed, + // which may not be desirable. + z.Reset(emptyReader{}) + readerPool.Put(z) + } + return +} + +type writer struct { + codec *Codec + *gzip.Writer +} + +func (w *writer) Close() (err error) { + if z := w.Writer; z != nil { + w.Writer = nil + err = z.Close() + z.Reset(nil) + w.codec.writerPool.Put(z) + } + return +} + +type emptyReader struct{} + +func (emptyReader) ReadByte() (byte, error) { return 0, io.EOF } + +func (emptyReader) Read([]byte) (int, error) { return 0, io.EOF } + +type errorReader struct{ err error } + +func (r *errorReader) Close() error { return r.err } + +func (r *errorReader) Read([]byte) (int, error) { return 0, r.err } + +type errorWriter struct{ err error } + +func (w *errorWriter) Close() error { return w.err } + +func (w *errorWriter) Write([]byte) (int, error) { return 0, w.err } diff --git a/vendor/github.com/segmentio/kafka-go/compress/lz4/lz4.go b/vendor/github.com/segmentio/kafka-go/compress/lz4/lz4.go new file mode 100644 index 00000000000..1aa8289b89e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/lz4/lz4.go @@ -0,0 +1,68 @@ +package lz4 + +import ( + "io" + "sync" + + "github.com/pierrec/lz4/v4" +) + +var ( + readerPool sync.Pool + writerPool sync.Pool +) + +// Codec is the implementation of a compress.Codec which supports creating +// readers and writers for kafka messages compressed with lz4. +type Codec struct{} + +// Code implements the compress.Codec interface. +func (c *Codec) Code() int8 { return 3 } + +// Name implements the compress.Codec interface. +func (c *Codec) Name() string { return "lz4" } + +// NewReader implements the compress.Codec interface. +func (c *Codec) NewReader(r io.Reader) io.ReadCloser { + z, _ := readerPool.Get().(*lz4.Reader) + if z != nil { + z.Reset(r) + } else { + z = lz4.NewReader(r) + } + return &reader{Reader: z} +} + +// NewWriter implements the compress.Codec interface. +func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { + z, _ := writerPool.Get().(*lz4.Writer) + if z != nil { + z.Reset(w) + } else { + z = lz4.NewWriter(w) + } + return &writer{Writer: z} +} + +type reader struct{ *lz4.Reader } + +func (r *reader) Close() (err error) { + if z := r.Reader; z != nil { + r.Reader = nil + z.Reset(nil) + readerPool.Put(z) + } + return +} + +type writer struct{ *lz4.Writer } + +func (w *writer) Close() (err error) { + if z := w.Writer; z != nil { + w.Writer = nil + err = z.Close() + z.Reset(nil) + writerPool.Put(z) + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/compress/snappy/snappy.go b/vendor/github.com/segmentio/kafka-go/compress/snappy/snappy.go new file mode 100644 index 00000000000..5bc6194f1d3 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/snappy/snappy.go @@ -0,0 +1,110 @@ +package snappy + +import ( + "io" + "sync" + + "github.com/klauspost/compress/s2" + "github.com/klauspost/compress/snappy" +) + +// Framing is an enumeration type used to enable or disable xerial framing of +// snappy messages. +type Framing int + +const ( + Framed Framing = iota + Unframed +) + +// Compression level. +type Compression int + +const ( + DefaultCompression Compression = iota + FasterCompression + BetterCompression + BestCompression +) + +var ( + readerPool sync.Pool + writerPool sync.Pool +) + +// Codec is the implementation of a compress.Codec which supports creating +// readers and writers for kafka messages compressed with snappy. +type Codec struct { + // An optional framing to apply to snappy compression. + // + // Default to Framed. + Framing Framing + + // Compression level. + Compression Compression +} + +// Code implements the compress.Codec interface. +func (c *Codec) Code() int8 { return 2 } + +// Name implements the compress.Codec interface. +func (c *Codec) Name() string { return "snappy" } + +// NewReader implements the compress.Codec interface. +func (c *Codec) NewReader(r io.Reader) io.ReadCloser { + x, _ := readerPool.Get().(*xerialReader) + if x != nil { + x.Reset(r) + } else { + x = &xerialReader{ + reader: r, + decode: snappy.Decode, + } + } + return &reader{xerialReader: x} +} + +// NewWriter implements the compress.Codec interface. +func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { + x, _ := writerPool.Get().(*xerialWriter) + if x != nil { + x.Reset(w) + } else { + x = &xerialWriter{writer: w} + } + x.framed = c.Framing == Framed + switch c.Compression { + case FasterCompression: + x.encode = s2.EncodeSnappy + case BetterCompression: + x.encode = s2.EncodeSnappyBetter + case BestCompression: + x.encode = s2.EncodeSnappyBest + default: + x.encode = snappy.Encode // aka. s2.EncodeSnappyBetter + } + return &writer{xerialWriter: x} +} + +type reader struct{ *xerialReader } + +func (r *reader) Close() (err error) { + if x := r.xerialReader; x != nil { + r.xerialReader = nil + x.Reset(nil) + readerPool.Put(x) + } + return +} + +type writer struct{ *xerialWriter } + +func (w *writer) Close() (err error) { + if x := w.xerialWriter; x != nil { + w.xerialWriter = nil + err = x.Flush() + x.Reset(nil) + writerPool.Put(x) + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/compress/snappy/xerial.go b/vendor/github.com/segmentio/kafka-go/compress/snappy/xerial.go new file mode 100644 index 00000000000..e2725af9c35 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/snappy/xerial.go @@ -0,0 +1,330 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + + "github.com/klauspost/compress/snappy" +) + +const defaultBufferSize = 32 * 1024 + +// An implementation of io.Reader which consumes a stream of xerial-framed +// snappy-encoeded data. The framing is optional, if no framing is detected +// the reader will simply forward the bytes from its underlying stream. +type xerialReader struct { + reader io.Reader + header [16]byte + input []byte + output []byte + offset int64 + nbytes int64 + decode func([]byte, []byte) ([]byte, error) +} + +func (x *xerialReader) Reset(r io.Reader) { + x.reader = r + x.input = x.input[:0] + x.output = x.output[:0] + x.header = [16]byte{} + x.offset = 0 + x.nbytes = 0 +} + +func (x *xerialReader) Read(b []byte) (int, error) { + for { + if x.offset < int64(len(x.output)) { + n := copy(b, x.output[x.offset:]) + x.offset += int64(n) + return n, nil + } + + n, err := x.readChunk(b) + if err != nil { + return 0, err + } + if n > 0 { + return n, nil + } + } +} + +func (x *xerialReader) WriteTo(w io.Writer) (int64, error) { + wn := int64(0) + + for { + for x.offset < int64(len(x.output)) { + n, err := w.Write(x.output[x.offset:]) + wn += int64(n) + x.offset += int64(n) + if err != nil { + return wn, err + } + } + + if _, err := x.readChunk(nil); err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return wn, err + } + } +} + +func (x *xerialReader) readChunk(dst []byte) (int, error) { + x.output = x.output[:0] + x.offset = 0 + prefix := 0 + + if x.nbytes == 0 { + n, err := x.readFull(x.header[:]) + if err != nil && n == 0 { + return 0, err + } + prefix = n + } + + if isXerialHeader(x.header[:]) { + if cap(x.input) < 4 { + x.input = make([]byte, 4, defaultBufferSize) + } else { + x.input = x.input[:4] + } + + _, err := x.readFull(x.input) + if err != nil { + return 0, err + } + + frame := int(binary.BigEndian.Uint32(x.input)) + if cap(x.input) < frame { + x.input = make([]byte, frame, align(frame, defaultBufferSize)) + } else { + x.input = x.input[:frame] + } + + if _, err := x.readFull(x.input); err != nil { + return 0, err + } + } else { + if cap(x.input) == 0 { + x.input = make([]byte, 0, defaultBufferSize) + } else { + x.input = x.input[:0] + } + + if prefix > 0 { + x.input = append(x.input, x.header[:prefix]...) + } + + for { + if len(x.input) == cap(x.input) { + b := make([]byte, len(x.input), 2*cap(x.input)) + copy(b, x.input) + x.input = b + } + + n, err := x.read(x.input[len(x.input):cap(x.input)]) + x.input = x.input[:len(x.input)+n] + if err != nil { + if errors.Is(err, io.EOF) && len(x.input) > 0 { + break + } + return 0, err + } + } + } + + var n int + var err error + + if x.decode == nil { + x.output, x.input, err = x.input, x.output, nil + } else if n, err = snappy.DecodedLen(x.input); n <= len(dst) && err == nil { + // If the output buffer is large enough to hold the decode value, + // write it there directly instead of using the intermediary output + // buffer. + _, err = x.decode(dst, x.input) + } else { + var b []byte + n = 0 + b, err = x.decode(x.output[:cap(x.output)], x.input) + if err == nil { + x.output = b + } + } + + return n, err +} + +func (x *xerialReader) read(b []byte) (int, error) { + n, err := x.reader.Read(b) + x.nbytes += int64(n) + return n, err +} + +func (x *xerialReader) readFull(b []byte) (int, error) { + n, err := io.ReadFull(x.reader, b) + x.nbytes += int64(n) + return n, err +} + +// An implementation of a xerial-framed snappy-encoded output stream. +// Each Write made to the writer is framed with a xerial header. +type xerialWriter struct { + writer io.Writer + header [16]byte + input []byte + output []byte + nbytes int64 + framed bool + encode func([]byte, []byte) []byte +} + +func (x *xerialWriter) Reset(w io.Writer) { + x.writer = w + x.input = x.input[:0] + x.output = x.output[:0] + x.nbytes = 0 +} + +func (x *xerialWriter) ReadFrom(r io.Reader) (int64, error) { + wn := int64(0) + + if cap(x.input) == 0 { + x.input = make([]byte, 0, defaultBufferSize) + } + + for { + if x.full() { + x.grow() + } + + n, err := r.Read(x.input[len(x.input):cap(x.input)]) + wn += int64(n) + x.input = x.input[:len(x.input)+n] + + if x.fullEnough() { + if err := x.Flush(); err != nil { + return wn, err + } + } + + if err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return wn, err + } + } +} + +func (x *xerialWriter) Write(b []byte) (int, error) { + wn := 0 + + if cap(x.input) == 0 { + x.input = make([]byte, 0, defaultBufferSize) + } + + for len(b) > 0 { + if x.full() { + x.grow() + } + + n := copy(x.input[len(x.input):cap(x.input)], b) + b = b[n:] + wn += n + x.input = x.input[:len(x.input)+n] + + if x.fullEnough() { + if err := x.Flush(); err != nil { + return wn, err + } + } + } + + return wn, nil +} + +func (x *xerialWriter) Flush() error { + if len(x.input) == 0 { + return nil + } + + var b []byte + if x.encode == nil { + b = x.input + } else { + x.output = x.encode(x.output[:cap(x.output)], x.input) + b = x.output + } + + x.input = x.input[:0] + x.output = x.output[:0] + + if x.framed && x.nbytes == 0 { + writeXerialHeader(x.header[:]) + _, err := x.write(x.header[:]) + if err != nil { + return err + } + } + + if x.framed { + writeXerialFrame(x.header[:4], len(b)) + _, err := x.write(x.header[:4]) + if err != nil { + return err + } + } + + _, err := x.write(b) + return err +} + +func (x *xerialWriter) write(b []byte) (int, error) { + n, err := x.writer.Write(b) + x.nbytes += int64(n) + return n, err +} + +func (x *xerialWriter) full() bool { + return len(x.input) == cap(x.input) +} + +func (x *xerialWriter) fullEnough() bool { + return x.framed && (cap(x.input)-len(x.input)) < 1024 +} + +func (x *xerialWriter) grow() { + tmp := make([]byte, len(x.input), 2*cap(x.input)) + copy(tmp, x.input) + x.input = tmp +} + +func align(n, a int) int { + if (n % a) == 0 { + return n + } + return ((n / a) + 1) * a +} + +var ( + xerialHeader = [...]byte{130, 83, 78, 65, 80, 80, 89, 0} + xerialVersionInfo = [...]byte{0, 0, 0, 1, 0, 0, 0, 1} +) + +func isXerialHeader(src []byte) bool { + return len(src) >= 16 && bytes.Equal(src[:8], xerialHeader[:]) +} + +func writeXerialHeader(b []byte) { + copy(b[:8], xerialHeader[:]) + copy(b[8:], xerialVersionInfo[:]) +} + +func writeXerialFrame(b []byte, n int) { + binary.BigEndian.PutUint32(b, uint32(n)) +} diff --git a/vendor/github.com/segmentio/kafka-go/compress/zstd/zstd.go b/vendor/github.com/segmentio/kafka-go/compress/zstd/zstd.go new file mode 100644 index 00000000000..1cc5e849045 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compress/zstd/zstd.go @@ -0,0 +1,168 @@ +// Package zstd implements Zstandard compression. +package zstd + +import ( + "io" + "sync" + + "github.com/klauspost/compress/zstd" +) + +// Codec is the implementation of a compress.Codec which supports creating +// readers and writers for kafka messages compressed with zstd. +type Codec struct { + // The compression level configured on writers created by the codec. + // + // Default to 3. + Level int + + encoderPool sync.Pool // *encoder +} + +// Code implements the compress.Codec interface. +func (c *Codec) Code() int8 { return 4 } + +// Name implements the compress.Codec interface. +func (c *Codec) Name() string { return "zstd" } + +// NewReader implements the compress.Codec interface. +func (c *Codec) NewReader(r io.Reader) io.ReadCloser { + p := new(reader) + if p.dec, _ = decoderPool.Get().(*zstd.Decoder); p.dec != nil { + p.dec.Reset(r) + } else { + z, err := zstd.NewReader(r, + zstd.WithDecoderConcurrency(1), + ) + if err != nil { + p.err = err + } else { + p.dec = z + } + } + return p +} + +func (c *Codec) level() int { + if c.Level != 0 { + return c.Level + } + return 3 +} + +func (c *Codec) zstdLevel() zstd.EncoderLevel { + return zstd.EncoderLevelFromZstd(c.level()) +} + +var decoderPool sync.Pool // *zstd.Decoder + +type reader struct { + dec *zstd.Decoder + err error +} + +// Close implements the io.Closer interface. +func (r *reader) Close() error { + if r.dec != nil { + r.dec.Reset(devNull{}) // don't retain the underlying reader + decoderPool.Put(r.dec) + r.dec = nil + r.err = io.ErrClosedPipe + } + return nil +} + +// Read implements the io.Reader interface. +func (r *reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + if r.dec == nil { + return 0, io.EOF + } + return r.dec.Read(p) +} + +// WriteTo implements the io.WriterTo interface. +func (r *reader) WriteTo(w io.Writer) (int64, error) { + if r.err != nil { + return 0, r.err + } + if r.dec == nil { + return 0, io.ErrClosedPipe + } + return r.dec.WriteTo(w) +} + +// NewWriter implements the compress.Codec interface. +func (c *Codec) NewWriter(w io.Writer) io.WriteCloser { + p := new(writer) + if enc, _ := c.encoderPool.Get().(*zstd.Encoder); enc == nil { + z, err := zstd.NewWriter(w, + zstd.WithEncoderLevel(c.zstdLevel()), + zstd.WithEncoderConcurrency(1), + zstd.WithZeroFrames(true), + ) + if err != nil { + p.err = err + } else { + p.enc = z + } + } else { + p.enc = enc + p.enc.Reset(w) + } + p.c = c + return p +} + +type writer struct { + c *Codec + enc *zstd.Encoder + err error +} + +// Close implements the io.Closer interface. +func (w *writer) Close() error { + if w.enc != nil { + // Close needs to be called to write the end of stream marker and flush + // the buffers. The zstd package documents that the encoder is re-usable + // after being closed. + err := w.enc.Close() + if err != nil { + w.err = err + } + w.enc.Reset(devNull{}) // don't retain the underlying writer + w.c.encoderPool.Put(w.enc) + w.enc = nil + return err + } + return w.err +} + +// WriteTo implements the io.WriterTo interface. +func (w *writer) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + if w.enc == nil { + return 0, io.ErrClosedPipe + } + return w.enc.Write(p) +} + +// ReadFrom implements the io.ReaderFrom interface. +func (w *writer) ReadFrom(r io.Reader) (int64, error) { + if w.err != nil { + return 0, w.err + } + if w.enc == nil { + return 0, io.ErrClosedPipe + } + return w.enc.ReadFrom(r) +} + +type devNull struct{} + +func (devNull) Read([]byte) (int, error) { return 0, io.EOF } +func (devNull) Write([]byte) (int, error) { return 0, nil } diff --git a/vendor/github.com/segmentio/kafka-go/compression.go b/vendor/github.com/segmentio/kafka-go/compression.go new file mode 100644 index 00000000000..411fe87a152 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compression.go @@ -0,0 +1,31 @@ +package kafka + +import ( + "errors" + + "github.com/segmentio/kafka-go/compress" +) + +type Compression = compress.Compression + +const ( + Gzip Compression = compress.Gzip + Snappy Compression = compress.Snappy + Lz4 Compression = compress.Lz4 + Zstd Compression = compress.Zstd +) + +type CompressionCodec = compress.Codec + +var ( + errUnknownCodec = errors.New("the compression code is invalid or its codec has not been imported") +) + +// resolveCodec looks up a codec by Code(). +func resolveCodec(code int8) (CompressionCodec, error) { + codec := compress.Compression(code).Codec() + if codec == nil { + return nil, errUnknownCodec + } + return codec, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/conn.go b/vendor/github.com/segmentio/kafka-go/conn.go new file mode 100644 index 00000000000..2b51afbd5f7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/conn.go @@ -0,0 +1,1645 @@ +package kafka + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "net" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" +) + +var ( + errInvalidWriteTopic = errors.New("writes must NOT set Topic on kafka.Message") + errInvalidWritePartition = errors.New("writes must NOT set Partition on kafka.Message") +) + +// Conn represents a connection to a kafka broker. +// +// Instances of Conn are safe to use concurrently from multiple goroutines. +type Conn struct { + // base network connection + conn net.Conn + + // number of inflight requests on the connection. + inflight int32 + + // offset management (synchronized on the mutex field) + mutex sync.Mutex + offset int64 + + // read buffer (synchronized on rlock) + rlock sync.Mutex + rbuf bufio.Reader + + // write buffer (synchronized on wlock) + wlock sync.Mutex + wbuf bufio.Writer + wb writeBuffer + + // deadline management + wdeadline connDeadline + rdeadline connDeadline + + // immutable values of the connection object + clientID string + topic string + partition int32 + fetchMaxBytes int32 + fetchMinSize int32 + broker int32 + rack string + + // correlation ID generator (synchronized on wlock) + correlationID int32 + + // number of replica acks required when publishing to a partition + requiredAcks int32 + + // lazily loaded API versions used by this connection + apiVersions atomic.Value // apiVersionMap + + transactionalID *string +} + +type apiVersionMap map[apiKey]ApiVersion + +func (v apiVersionMap) negotiate(key apiKey, sortedSupportedVersions ...apiVersion) apiVersion { + x := v[key] + + for i := len(sortedSupportedVersions) - 1; i >= 0; i-- { + s := sortedSupportedVersions[i] + + if apiVersion(x.MaxVersion) >= s { + return s + } + } + + return -1 +} + +// ConnConfig is a configuration object used to create new instances of Conn. +type ConnConfig struct { + ClientID string + Topic string + Partition int + Broker int + Rack string + + // The transactional id to use for transactional delivery. Idempotent + // deliver should be enabled if transactional id is configured. + // For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs + // Empty string means that this connection can't be transactional. + TransactionalID string +} + +// ReadBatchConfig is a configuration object used for reading batches of messages. +type ReadBatchConfig struct { + // MinBytes indicates to the broker the minimum batch size that the consumer + // will accept. Setting a high minimum when consuming from a low-volume topic + // may result in delayed delivery when the broker does not have enough data to + // satisfy the defined minimum. + MinBytes int + + // MaxBytes indicates to the broker the maximum batch size that the consumer + // will accept. The broker will truncate a message to satisfy this maximum, so + // choose a value that is high enough for your largest message size. + MaxBytes int + + // IsolationLevel controls the visibility of transactional records. + // ReadUncommitted makes all records visible. With ReadCommitted only + // non-transactional and committed records are visible. + IsolationLevel IsolationLevel + + // MaxWait is the amount of time for the broker while waiting to hit the + // min/max byte targets. This setting is independent of any network-level + // timeouts or deadlines. + // + // For backward compatibility, when this field is left zero, kafka-go will + // infer the max wait from the connection's read deadline. + MaxWait time.Duration +} + +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = 0 + ReadCommitted IsolationLevel = 1 +) + +var ( + // DefaultClientID is the default value used as ClientID of kafka + // connections. + DefaultClientID string +) + +func init() { + progname := filepath.Base(os.Args[0]) + hostname, _ := os.Hostname() + DefaultClientID = fmt.Sprintf("%s@%s (github.com/segmentio/kafka-go)", progname, hostname) +} + +// NewConn returns a new kafka connection for the given topic and partition. +func NewConn(conn net.Conn, topic string, partition int) *Conn { + return NewConnWith(conn, ConnConfig{ + Topic: topic, + Partition: partition, + }) +} + +func emptyToNullable(transactionalID string) (result *string) { + if transactionalID != "" { + result = &transactionalID + } + return result +} + +// NewConnWith returns a new kafka connection configured with config. +// The offset is initialized to FirstOffset. +func NewConnWith(conn net.Conn, config ConnConfig) *Conn { + if len(config.ClientID) == 0 { + config.ClientID = DefaultClientID + } + + if config.Partition < 0 || config.Partition > math.MaxInt32 { + panic(fmt.Sprintf("invalid partition number: %d", config.Partition)) + } + + c := &Conn{ + conn: conn, + rbuf: *bufio.NewReader(conn), + wbuf: *bufio.NewWriter(conn), + clientID: config.ClientID, + topic: config.Topic, + partition: int32(config.Partition), + broker: int32(config.Broker), + rack: config.Rack, + offset: FirstOffset, + requiredAcks: -1, + transactionalID: emptyToNullable(config.TransactionalID), + } + + c.wb.w = &c.wbuf + + // The fetch request needs to ask for a MaxBytes value that is at least + // enough to load the control data of the response. To avoid having to + // recompute it on every read, it is cached here in the Conn value. + c.fetchMinSize = (fetchResponseV2{ + Topics: []fetchResponseTopicV2{{ + TopicName: config.Topic, + Partitions: []fetchResponsePartitionV2{{ + Partition: int32(config.Partition), + MessageSet: messageSet{{}}, + }}, + }}, + }).size() + c.fetchMaxBytes = math.MaxInt32 - c.fetchMinSize + return c +} + +func (c *Conn) negotiateVersion(key apiKey, sortedSupportedVersions ...apiVersion) (apiVersion, error) { + v, err := c.loadVersions() + if err != nil { + return -1, err + } + a := v.negotiate(key, sortedSupportedVersions...) + if a < 0 { + return -1, fmt.Errorf("no matching versions were found between the client and the broker for API key %d", key) + } + return a, nil +} + +func (c *Conn) loadVersions() (apiVersionMap, error) { + v, _ := c.apiVersions.Load().(apiVersionMap) + if v != nil { + return v, nil + } + + brokerVersions, err := c.ApiVersions() + if err != nil { + return nil, err + } + + v = make(apiVersionMap, len(brokerVersions)) + + for _, a := range brokerVersions { + v[apiKey(a.ApiKey)] = a + } + + c.apiVersions.Store(v) + return v, nil +} + +// Broker returns a Broker value representing the kafka broker that this +// connection was established to. +func (c *Conn) Broker() Broker { + addr := c.conn.RemoteAddr() + host, port, _ := splitHostPortNumber(addr.String()) + return Broker{ + Host: host, + Port: port, + ID: int(c.broker), + Rack: c.rack, + } +} + +// Controller requests kafka for the current controller and returns its URL. +func (c *Conn) Controller() (broker Broker, err error) { + err = c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{})) + }, + func(deadline time.Time, size int) error { + var res metadataResponseV1 + + if err := c.readResponse(size, &res); err != nil { + return err + } + for _, brokerMeta := range res.Brokers { + if brokerMeta.NodeID == res.ControllerID { + broker = Broker{ID: int(brokerMeta.NodeID), + Port: int(brokerMeta.Port), + Host: brokerMeta.Host, + Rack: brokerMeta.Rack} + break + } + } + return nil + }, + ) + return broker, err +} + +// Brokers retrieve the broker list from the Kafka metadata. +func (c *Conn) Brokers() ([]Broker, error) { + var brokers []Broker + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(metadata, v1, id, topicMetadataRequestV1([]string{})) + }, + func(deadline time.Time, size int) error { + var res metadataResponseV1 + + if err := c.readResponse(size, &res); err != nil { + return err + } + + brokers = make([]Broker, len(res.Brokers)) + for i, brokerMeta := range res.Brokers { + brokers[i] = Broker{ + ID: int(brokerMeta.NodeID), + Port: int(brokerMeta.Port), + Host: brokerMeta.Host, + Rack: brokerMeta.Rack, + } + } + return nil + }, + ) + return brokers, err +} + +// DeleteTopics deletes the specified topics. +func (c *Conn) DeleteTopics(topics ...string) error { + _, err := c.deleteTopics(deleteTopicsRequestV0{ + Topics: topics, + }) + return err +} + +// findCoordinator finds the coordinator for the specified group or transaction +// +// See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator +func (c *Conn) findCoordinator(request findCoordinatorRequestV0) (findCoordinatorResponseV0, error) { + var response findCoordinatorResponseV0 + + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(findCoordinator, v0, id, request) + + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return findCoordinatorResponseV0{}, err + } + if response.ErrorCode != 0 { + return findCoordinatorResponseV0{}, Error(response.ErrorCode) + } + + return response, nil +} + +// heartbeat sends a heartbeat message required by consumer groups +// +// See http://kafka.apache.org/protocol.html#The_Messages_Heartbeat +func (c *Conn) heartbeat(request heartbeatRequestV0) (heartbeatResponseV0, error) { + var response heartbeatResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(heartbeat, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return heartbeatResponseV0{}, err + } + if response.ErrorCode != 0 { + return heartbeatResponseV0{}, Error(response.ErrorCode) + } + + return response, nil +} + +// joinGroup attempts to join a consumer group +// +// See http://kafka.apache.org/protocol.html#The_Messages_JoinGroup +func (c *Conn) joinGroup(request joinGroupRequestV1) (joinGroupResponseV1, error) { + var response joinGroupResponseV1 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(joinGroup, v1, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return joinGroupResponseV1{}, err + } + if response.ErrorCode != 0 { + return joinGroupResponseV1{}, Error(response.ErrorCode) + } + + return response, nil +} + +// leaveGroup leaves the consumer from the consumer group +// +// See http://kafka.apache.org/protocol.html#The_Messages_LeaveGroup +func (c *Conn) leaveGroup(request leaveGroupRequestV0) (leaveGroupResponseV0, error) { + var response leaveGroupResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(leaveGroup, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return leaveGroupResponseV0{}, err + } + if response.ErrorCode != 0 { + return leaveGroupResponseV0{}, Error(response.ErrorCode) + } + + return response, nil +} + +// listGroups lists all the consumer groups +// +// See http://kafka.apache.org/protocol.html#The_Messages_ListGroups +func (c *Conn) listGroups(request listGroupsRequestV1) (listGroupsResponseV1, error) { + var response listGroupsResponseV1 + + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(listGroups, v1, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return listGroupsResponseV1{}, err + } + if response.ErrorCode != 0 { + return listGroupsResponseV1{}, Error(response.ErrorCode) + } + + return response, nil +} + +// offsetCommit commits the specified topic partition offsets +// +// See http://kafka.apache.org/protocol.html#The_Messages_OffsetCommit +func (c *Conn) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) { + var response offsetCommitResponseV2 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(offsetCommit, v2, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return offsetCommitResponseV2{}, err + } + for _, r := range response.Responses { + for _, pr := range r.PartitionResponses { + if pr.ErrorCode != 0 { + return offsetCommitResponseV2{}, Error(pr.ErrorCode) + } + } + } + + return response, nil +} + +// offsetFetch fetches the offsets for the specified topic partitions. +// -1 indicates that there is no offset saved for the partition. +// +// See http://kafka.apache.org/protocol.html#The_Messages_OffsetFetch +func (c *Conn) offsetFetch(request offsetFetchRequestV1) (offsetFetchResponseV1, error) { + var response offsetFetchResponseV1 + + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(offsetFetch, v1, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return offsetFetchResponseV1{}, err + } + for _, r := range response.Responses { + for _, pr := range r.PartitionResponses { + if pr.ErrorCode != 0 { + return offsetFetchResponseV1{}, Error(pr.ErrorCode) + } + } + } + + return response, nil +} + +// syncGroup completes the handshake to join a consumer group +// +// See http://kafka.apache.org/protocol.html#The_Messages_SyncGroup +func (c *Conn) syncGroup(request syncGroupRequestV0) (syncGroupResponseV0, error) { + var response syncGroupResponseV0 + + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(syncGroup, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return syncGroupResponseV0{}, err + } + if response.ErrorCode != 0 { + return syncGroupResponseV0{}, Error(response.ErrorCode) + } + + return response, nil +} + +// Close closes the kafka connection. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// SetDeadline sets the read and write deadlines associated with the connection. +// It is equivalent to calling both SetReadDeadline and SetWriteDeadline. +// +// A deadline is an absolute time after which I/O operations fail with a timeout +// (see type Error) instead of blocking. The deadline applies to all future and +// pending I/O, not just the immediately following call to Read or Write. After +// a deadline has been exceeded, the connection may be closed if it was found to +// be in an unrecoverable state. +// +// A zero value for t means I/O operations will not time out. +func (c *Conn) SetDeadline(t time.Time) error { + c.rdeadline.setDeadline(t) + c.wdeadline.setDeadline(t) + return nil +} + +// SetReadDeadline sets the deadline for future Read calls and any +// currently-blocked Read call. +// A zero value for t means Read will not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + c.rdeadline.setDeadline(t) + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls and any +// currently-blocked Write call. +// Even if write times out, it may return n > 0, indicating that some of the +// data was successfully written. +// A zero value for t means Write will not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.wdeadline.setDeadline(t) + return nil +} + +// Offset returns the current offset of the connection as pair of integers, +// where the first one is an offset value and the second one indicates how +// to interpret it. +// +// See Seek for more details about the offset and whence values. +func (c *Conn) Offset() (offset int64, whence int) { + c.mutex.Lock() + offset = c.offset + c.mutex.Unlock() + + switch offset { + case FirstOffset: + offset = 0 + whence = SeekStart + case LastOffset: + offset = 0 + whence = SeekEnd + default: + whence = SeekAbsolute + } + return +} + +const ( + SeekStart = 0 // Seek relative to the first offset available in the partition. + SeekAbsolute = 1 // Seek to an absolute offset. + SeekEnd = 2 // Seek relative to the last offset available in the partition. + SeekCurrent = 3 // Seek relative to the current offset. + + // This flag may be combined to any of the SeekAbsolute and SeekCurrent + // constants to skip the bound check that the connection would do otherwise. + // Programs can use this flag to avoid making a metadata request to the kafka + // broker to read the current first and last offsets of the partition. + SeekDontCheck = 1 << 30 +) + +// Seek sets the offset for the next read or write operation according to whence, which +// should be one of SeekStart, SeekAbsolute, SeekEnd, or SeekCurrent. +// When seeking relative to the end, the offset is subtracted from the current offset. +// Note that for historical reasons, these do not align with the usual whence constants +// as in lseek(2) or os.Seek. +// The method returns the new absolute offset of the connection. +func (c *Conn) Seek(offset int64, whence int) (int64, error) { + seekDontCheck := (whence & SeekDontCheck) != 0 + whence &= ^SeekDontCheck + + switch whence { + case SeekStart, SeekAbsolute, SeekEnd, SeekCurrent: + default: + return 0, fmt.Errorf("whence must be one of 0, 1, 2, or 3. (whence = %d)", whence) + } + + if seekDontCheck { + if whence == SeekAbsolute { + c.mutex.Lock() + c.offset = offset + c.mutex.Unlock() + return offset, nil + } + + if whence == SeekCurrent { + c.mutex.Lock() + c.offset += offset + offset = c.offset + c.mutex.Unlock() + return offset, nil + } + } + + if whence == SeekAbsolute { + c.mutex.Lock() + unchanged := offset == c.offset + c.mutex.Unlock() + if unchanged { + return offset, nil + } + } + + if whence == SeekCurrent { + c.mutex.Lock() + offset = c.offset + offset + c.mutex.Unlock() + } + + first, last, err := c.ReadOffsets() + if err != nil { + return 0, err + } + + switch whence { + case SeekStart: + offset = first + offset + case SeekEnd: + offset = last - offset + } + + if offset < first || offset > last { + return 0, OffsetOutOfRange + } + + c.mutex.Lock() + c.offset = offset + c.mutex.Unlock() + return offset, nil +} + +// Read reads the message at the current offset from the connection, advancing +// the offset on success so the next call to a read method will produce the next +// message. +// The method returns the number of bytes read, or an error if something went +// wrong. +// +// While it is safe to call Read concurrently from multiple goroutines it may +// be hard for the program to predict the results as the connection offset will +// be read and written by multiple goroutines, they could read duplicates, or +// messages may be seen by only some of the goroutines. +// +// The method fails with io.ErrShortBuffer if the buffer passed as argument is +// too small to hold the message value. +// +// This method is provided to satisfy the net.Conn interface but is much less +// efficient than using the more general purpose ReadBatch method. +func (c *Conn) Read(b []byte) (int, error) { + batch := c.ReadBatch(1, len(b)) + n, err := batch.Read(b) + return n, coalesceErrors(silentEOF(err), batch.Close()) +} + +// ReadMessage reads the message at the current offset from the connection, +// advancing the offset on success so the next call to a read method will +// produce the next message. +// +// Because this method allocate memory buffers for the message key and value +// it is less memory-efficient than Read, but has the advantage of never +// failing with io.ErrShortBuffer. +// +// While it is safe to call Read concurrently from multiple goroutines it may +// be hard for the program to predict the results as the connection offset will +// be read and written by multiple goroutines, they could read duplicates, or +// messages may be seen by only some of the goroutines. +// +// This method is provided for convenience purposes but is much less efficient +// than using the more general purpose ReadBatch method. +func (c *Conn) ReadMessage(maxBytes int) (Message, error) { + batch := c.ReadBatch(1, maxBytes) + msg, err := batch.ReadMessage() + return msg, coalesceErrors(silentEOF(err), batch.Close()) +} + +// ReadBatch reads a batch of messages from the kafka server. The method always +// returns a non-nil Batch value. If an error occurred, either sending the fetch +// request or reading the response, the error will be made available by the +// returned value of the batch's Close method. +// +// While it is safe to call ReadBatch concurrently from multiple goroutines it +// may be hard for the program to predict the results as the connection offset +// will be read and written by multiple goroutines, they could read duplicates, +// or messages may be seen by only some of the goroutines. +// +// A program doesn't specify the number of messages in wants from a batch, but +// gives the minimum and maximum number of bytes that it wants to receive from +// the kafka server. +func (c *Conn) ReadBatch(minBytes, maxBytes int) *Batch { + return c.ReadBatchWith(ReadBatchConfig{ + MinBytes: minBytes, + MaxBytes: maxBytes, + }) +} + +// ReadBatchWith in every way is similar to ReadBatch. ReadBatch is configured +// with the default values in ReadBatchConfig except for minBytes and maxBytes. +func (c *Conn) ReadBatchWith(cfg ReadBatchConfig) *Batch { + + var adjustedDeadline time.Time + var maxFetch = int(c.fetchMaxBytes) + + if cfg.MinBytes < 0 || cfg.MinBytes > maxFetch { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", cfg.MinBytes, maxFetch)} + } + if cfg.MaxBytes < 0 || cfg.MaxBytes > maxFetch { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", cfg.MaxBytes, maxFetch)} + } + if cfg.MinBytes > cfg.MaxBytes { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", cfg.MinBytes, cfg.MaxBytes)} + } + + offset, whence := c.Offset() + + offset, err := c.Seek(offset, whence|SeekDontCheck) + if err != nil { + return &Batch{err: dontExpectEOF(err)} + } + + fetchVersion, err := c.negotiateVersion(fetch, v2, v5, v10) + if err != nil { + return &Batch{err: dontExpectEOF(err)} + } + + id, err := c.doRequest(&c.rdeadline, func(deadline time.Time, id int32) error { + now := time.Now() + var timeout time.Duration + if cfg.MaxWait > 0 { + // explicitly-configured case: no changes are made to the deadline, + // and the timeout is sent exactly as specified. + timeout = cfg.MaxWait + } else { + // default case: use the original logic to adjust the conn's + // deadline.T + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + timeout = deadlineToTimeout(deadline, now) + } + // save this variable outside of the closure for later use in detecting + // truncated messages. + adjustedDeadline = deadline + switch fetchVersion { + case v10: + return c.wb.writeFetchRequestV10( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + timeout, + int8(cfg.IsolationLevel), + ) + case v5: + return c.wb.writeFetchRequestV5( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + timeout, + int8(cfg.IsolationLevel), + ) + default: + return c.wb.writeFetchRequestV2( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + timeout, + ) + } + }) + if err != nil { + return &Batch{err: dontExpectEOF(err)} + } + + _, size, lock, err := c.waitResponse(&c.rdeadline, id) + if err != nil { + return &Batch{err: dontExpectEOF(err)} + } + + var throttle int32 + var highWaterMark int64 + var remain int + + switch fetchVersion { + case v10: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV10(&c.rbuf, size) + case v5: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV5(&c.rbuf, size) + default: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV2(&c.rbuf, size) + } + if errors.Is(err, errShortRead) { + err = checkTimeoutErr(adjustedDeadline) + } + + var msgs *messageSetReader + if err == nil { + if highWaterMark == offset { + msgs = &messageSetReader{empty: true} + } else { + msgs, err = newMessageSetReader(&c.rbuf, remain) + } + } + if errors.Is(err, errShortRead) { + err = checkTimeoutErr(adjustedDeadline) + } + + return &Batch{ + conn: c, + msgs: msgs, + deadline: adjustedDeadline, + throttle: makeDuration(throttle), + lock: lock, + topic: c.topic, // topic is copied to Batch to prevent race with Batch.close + partition: int(c.partition), // partition is copied to Batch to prevent race with Batch.close + offset: offset, + highWaterMark: highWaterMark, + // there shouldn't be a short read on initially setting up the batch. + // as such, any io.EOF is re-mapped to an io.ErrUnexpectedEOF so that we + // don't accidentally signal that we successfully reached the end of the + // batch. + err: dontExpectEOF(err), + } +} + +// ReadOffset returns the offset of the first message with a timestamp equal or +// greater to t. +func (c *Conn) ReadOffset(t time.Time) (int64, error) { + return c.readOffset(timestamp(t)) +} + +// ReadFirstOffset returns the first offset available on the connection. +func (c *Conn) ReadFirstOffset() (int64, error) { + return c.readOffset(FirstOffset) +} + +// ReadLastOffset returns the last offset available on the connection. +func (c *Conn) ReadLastOffset() (int64, error) { + return c.readOffset(LastOffset) +} + +// ReadOffsets returns the absolute first and last offsets of the topic used by +// the connection. +func (c *Conn) ReadOffsets() (first, last int64, err error) { + // We have to submit two different requests to fetch the first and last + // offsets because kafka refuses requests that ask for multiple offsets + // on the same topic and partition. + if first, err = c.ReadFirstOffset(); err != nil { + return + } + if last, err = c.ReadLastOffset(); err != nil { + first = 0 // don't leak the value on error + return + } + return +} + +func (c *Conn) readOffset(t int64) (offset int64, err error) { + err = c.readOperation( + func(deadline time.Time, id int32) error { + return c.wb.writeListOffsetRequestV1(id, c.clientID, c.topic, c.partition, t) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { + // We skip the topic name because we've made a request for + // a single topic. + size, err := discardString(r, size) + if err != nil { + return size, err + } + + // Reading the array of partitions, there will be only one + // partition which gives the offset we're looking for. + return readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { + var p partitionOffsetV1 + size, err := p.readFrom(r, size) + if err != nil { + return size, err + } + if p.ErrorCode != 0 { + return size, Error(p.ErrorCode) + } + offset = p.Offset + return size, nil + }) + })) + }, + ) + return +} + +// ReadPartitions returns the list of available partitions for the given list of +// topics. +// +// If the method is called with no topic, it uses the topic configured on the +// connection. If there are none, the method fetches all partitions of the kafka +// cluster. +func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err error) { + + if len(topics) == 0 { + if len(c.topic) != 0 { + defaultTopics := [...]string{c.topic} + topics = defaultTopics[:] + } else { + // topics needs to be explicitly nil-ed out or the broker will + // interpret it as a request for 0 partitions instead of all. + topics = nil + } + } + metadataVersion, err := c.negotiateVersion(metadata, v1, v6) + if err != nil { + return nil, err + } + + err = c.readOperation( + func(deadline time.Time, id int32) error { + switch metadataVersion { + case v6: + return c.writeRequest(metadata, v6, id, topicMetadataRequestV6{Topics: topics, AllowAutoTopicCreation: true}) + default: + return c.writeRequest(metadata, v1, id, topicMetadataRequestV1(topics)) + } + }, + func(deadline time.Time, size int) error { + partitions, err = c.readPartitionsResponse(metadataVersion, size) + return err + }, + ) + return +} + +func (c *Conn) readPartitionsResponse(metadataVersion apiVersion, size int) ([]Partition, error) { + switch metadataVersion { + case v6: + var res metadataResponseV6 + if err := c.readResponse(size, &res); err != nil { + return nil, err + } + brokers := readBrokerMetadata(res.Brokers) + return c.readTopicMetadatav6(brokers, res.Topics) + default: + var res metadataResponseV1 + if err := c.readResponse(size, &res); err != nil { + return nil, err + } + brokers := readBrokerMetadata(res.Brokers) + return c.readTopicMetadatav1(brokers, res.Topics) + } +} + +func readBrokerMetadata(brokerMetadata []brokerMetadataV1) map[int32]Broker { + brokers := make(map[int32]Broker, len(brokerMetadata)) + for _, b := range brokerMetadata { + brokers[b.NodeID] = Broker{ + Host: b.Host, + Port: int(b.Port), + ID: int(b.NodeID), + Rack: b.Rack, + } + } + return brokers +} + +func (c *Conn) readTopicMetadatav1(brokers map[int32]Broker, topicMetadata []topicMetadataV1) (partitions []Partition, err error) { + for _, t := range topicMetadata { + if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) { + // We only report errors if they happened for the topic of + // the connection, otherwise the topic will simply have no + // partitions in the result set. + return nil, Error(t.TopicErrorCode) + } + for _, p := range t.Partitions { + partitions = append(partitions, Partition{ + Topic: t.TopicName, + Leader: brokers[p.Leader], + Replicas: makeBrokers(brokers, p.Replicas...), + Isr: makeBrokers(brokers, p.Isr...), + ID: int(p.PartitionID), + OfflineReplicas: []Broker{}, + }) + } + } + return +} + +func (c *Conn) readTopicMetadatav6(brokers map[int32]Broker, topicMetadata []topicMetadataV6) (partitions []Partition, err error) { + for _, t := range topicMetadata { + if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) { + // We only report errors if they happened for the topic of + // the connection, otherwise the topic will simply have no + // partitions in the result set. + return nil, Error(t.TopicErrorCode) + } + for _, p := range t.Partitions { + partitions = append(partitions, Partition{ + Topic: t.TopicName, + Leader: brokers[p.Leader], + Replicas: makeBrokers(brokers, p.Replicas...), + Isr: makeBrokers(brokers, p.Isr...), + ID: int(p.PartitionID), + OfflineReplicas: makeBrokers(brokers, p.OfflineReplicas...), + }) + } + } + return +} + +func makeBrokers(brokers map[int32]Broker, ids ...int32) []Broker { + b := make([]Broker, len(ids)) + for i, id := range ids { + br, ok := brokers[id] + if !ok { + // When the broker id isn't found in the current list of known + // brokers, use a placeholder to report that the cluster has + // logical knowledge of the broker but no information about the + // physical host where it is running. + br.ID = int(id) + } + b[i] = br + } + return b +} + +// Write writes a message to the kafka broker that this connection was +// established to. The method returns the number of bytes written, or an error +// if something went wrong. +// +// The operation either succeeds or fail, it never partially writes the message. +// +// This method is exposed to satisfy the net.Conn interface but is less efficient +// than the more general purpose WriteMessages method. +func (c *Conn) Write(b []byte) (int, error) { + return c.WriteCompressedMessages(nil, Message{Value: b}) +} + +// WriteMessages writes a batch of messages to the connection's topic and +// partition, returning the number of bytes written. The write is an atomic +// operation, it either fully succeeds or fails. +func (c *Conn) WriteMessages(msgs ...Message) (int, error) { + return c.WriteCompressedMessages(nil, msgs...) +} + +// WriteCompressedMessages writes a batch of messages to the connection's topic +// and partition, returning the number of bytes written. The write is an atomic +// operation, it either fully succeeds or fails. +// +// If the compression codec is not nil, the messages will be compressed. +func (c *Conn) WriteCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, err error) { + nbytes, _, _, _, err = c.writeCompressedMessages(codec, msgs...) + return +} + +// WriteCompressedMessagesAt writes a batch of messages to the connection's topic +// and partition, returning the number of bytes written, partition and offset numbers +// and timestamp assigned by the kafka broker to the message set. The write is an atomic +// operation, it either fully succeeds or fails. +// +// If the compression codec is not nil, the messages will be compressed. +func (c *Conn) WriteCompressedMessagesAt(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) { + return c.writeCompressedMessages(codec, msgs...) +} + +func (c *Conn) writeCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) { + if len(msgs) == 0 { + return + } + + writeTime := time.Now() + for i, msg := range msgs { + // users may believe they can set the Topic and/or Partition + // on the kafka message. + if msg.Topic != "" && msg.Topic != c.topic { + err = errInvalidWriteTopic + return + } + if msg.Partition != 0 { + err = errInvalidWritePartition + return + } + + if msg.Time.IsZero() { + msgs[i].Time = writeTime + } + + nbytes += len(msg.Key) + len(msg.Value) + } + + var produceVersion apiVersion + if produceVersion, err = c.negotiateVersion(produce, v2, v3, v7); err != nil { + return + } + + err = c.writeOperation( + func(deadline time.Time, id int32) error { + now := time.Now() + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + switch produceVersion { + case v7: + recordBatch, err := + newRecordBatch( + codec, + msgs..., + ) + if err != nil { + return err + } + return c.wb.writeProduceRequestV7( + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + c.transactionalID, + recordBatch, + ) + case v3: + recordBatch, err := + newRecordBatch( + codec, + msgs..., + ) + if err != nil { + return err + } + return c.wb.writeProduceRequestV3( + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + c.transactionalID, + recordBatch, + ) + default: + return c.wb.writeProduceRequestV2( + codec, + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + msgs..., + ) + } + }, + func(deadline time.Time, size int) error { + return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { + // Skip the topic, we've produced the message to only one topic, + // no need to waste resources loading it in memory. + size, err := discardString(r, size) + if err != nil { + return size, err + } + + // Read the list of partitions, there should be only one since + // we've produced a message to a single partition. + size, err = readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { + switch produceVersion { + case v7: + var p produceResponsePartitionV7 + size, err := p.readFrom(r, size) + if err == nil && p.ErrorCode != 0 { + err = Error(p.ErrorCode) + } + if err == nil { + partition = p.Partition + offset = p.Offset + appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond)) + } + return size, err + default: + var p produceResponsePartitionV2 + size, err := p.readFrom(r, size) + if err == nil && p.ErrorCode != 0 { + err = Error(p.ErrorCode) + } + if err == nil { + partition = p.Partition + offset = p.Offset + appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond)) + } + return size, err + } + + }) + if err != nil { + return size, err + } + + // The response is trailed by the throttle time, also skipping + // since it's not interesting here. + return discardInt32(r, size) + })) + }, + ) + + if err != nil { + nbytes = 0 + } + + return +} + +// SetRequiredAcks sets the number of acknowledges from replicas that the +// connection requests when producing messages. +func (c *Conn) SetRequiredAcks(n int) error { + switch n { + case -1, 1: + atomic.StoreInt32(&c.requiredAcks, int32(n)) + return nil + default: + return InvalidRequiredAcks + } +} + +func (c *Conn) writeRequest(apiKey apiKey, apiVersion apiVersion, correlationID int32, req request) error { + hdr := c.requestHeader(apiKey, apiVersion, correlationID) + hdr.Size = (hdr.size() + req.size()) - 4 + hdr.writeTo(&c.wb) + req.writeTo(&c.wb) + return c.wbuf.Flush() +} + +func (c *Conn) readResponse(size int, res interface{}) error { + size, err := read(&c.rbuf, size, res) + if err != nil { + var kafkaError Error + if errors.As(err, &kafkaError) { + size, err = discardN(&c.rbuf, size, size) + } + } + return expectZeroSize(size, err) +} + +func (c *Conn) peekResponseSizeAndID() (int32, int32, error) { + b, err := c.rbuf.Peek(8) + if err != nil { + return 0, 0, err + } + size, id := makeInt32(b[:4]), makeInt32(b[4:]) + return size, id, nil +} + +func (c *Conn) skipResponseSizeAndID() { + c.rbuf.Discard(8) +} + +func (c *Conn) readDeadline() time.Time { + return c.rdeadline.deadline() +} + +func (c *Conn) writeDeadline() time.Time { + return c.wdeadline.deadline() +} + +func (c *Conn) readOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error { + return c.do(&c.rdeadline, write, read) +} + +func (c *Conn) writeOperation(write func(time.Time, int32) error, read func(time.Time, int) error) error { + return c.do(&c.wdeadline, write, read) +} + +func (c *Conn) enter() { + atomic.AddInt32(&c.inflight, +1) +} + +func (c *Conn) leave() { + atomic.AddInt32(&c.inflight, -1) +} + +func (c *Conn) concurrency() int { + return int(atomic.LoadInt32(&c.inflight)) +} + +func (c *Conn) do(d *connDeadline, write func(time.Time, int32) error, read func(time.Time, int) error) error { + id, err := c.doRequest(d, write) + if err != nil { + return err + } + + deadline, size, lock, err := c.waitResponse(d, id) + if err != nil { + return err + } + + if err = read(deadline, size); err != nil { + var kafkaError Error + if !errors.As(err, &kafkaError) { + c.conn.Close() + } + } + + d.unsetConnReadDeadline() + lock.Unlock() + return err +} + +func (c *Conn) doRequest(d *connDeadline, write func(time.Time, int32) error) (id int32, err error) { + c.enter() + c.wlock.Lock() + c.correlationID++ + id = c.correlationID + err = write(d.setConnWriteDeadline(c.conn), id) + d.unsetConnWriteDeadline() + + if err != nil { + // When an error occurs there's no way to know if the connection is in a + // recoverable state so we're better off just giving up at this point to + // avoid any risk of corrupting the following operations. + c.conn.Close() + c.leave() + } + + c.wlock.Unlock() + return +} + +func (c *Conn) waitResponse(d *connDeadline, id int32) (deadline time.Time, size int, lock *sync.Mutex, err error) { + for { + var rsz int32 + var rid int32 + + c.rlock.Lock() + deadline = d.setConnReadDeadline(c.conn) + rsz, rid, err = c.peekResponseSizeAndID() + + if err != nil { + d.unsetConnReadDeadline() + c.conn.Close() + c.rlock.Unlock() + break + } + + if id == rid { + c.skipResponseSizeAndID() + size, lock = int(rsz-4), &c.rlock + // Don't unlock the read mutex to yield ownership to the caller. + break + } + + if c.concurrency() == 1 { + // If the goroutine is the only one waiting on this connection it + // should be impossible to read a correlation id different from the + // one it expects. This is a sign that the data we are reading on + // the wire is corrupted and the connection needs to be closed. + err = io.ErrNoProgress + c.rlock.Unlock() + break + } + + // Optimistically release the read lock if a response has already + // been received but the current operation is not the target for it. + c.rlock.Unlock() + } + + c.leave() + return +} + +func (c *Conn) requestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32) requestHeader { + return requestHeader{ + ApiKey: int16(apiKey), + ApiVersion: int16(apiVersion), + CorrelationID: correlationID, + ClientID: c.clientID, + } +} + +func (c *Conn) ApiVersions() ([]ApiVersion, error) { + deadline := &c.rdeadline + + if deadline.deadline().IsZero() { + // ApiVersions is called automatically when API version negotiation + // needs to happen, so we are not guaranteed that a read deadline has + // been set yet. Fallback to use the write deadline in case it was + // set, for example when version negotiation is initiated during a + // produce request. + deadline = &c.wdeadline + } + + id, err := c.doRequest(deadline, func(_ time.Time, id int32) error { + h := requestHeader{ + ApiKey: int16(apiVersions), + ApiVersion: int16(v0), + CorrelationID: id, + ClientID: c.clientID, + } + h.Size = (h.size() - 4) + h.writeTo(&c.wb) + return c.wbuf.Flush() + }) + if err != nil { + return nil, err + } + + _, size, lock, err := c.waitResponse(deadline, id) + if err != nil { + return nil, err + } + defer lock.Unlock() + + var errorCode int16 + if size, err = readInt16(&c.rbuf, size, &errorCode); err != nil { + return nil, err + } + var arrSize int32 + if size, err = readInt32(&c.rbuf, size, &arrSize); err != nil { + return nil, err + } + r := make([]ApiVersion, arrSize) + for i := 0; i < int(arrSize); i++ { + if size, err = readInt16(&c.rbuf, size, &r[i].ApiKey); err != nil { + return nil, err + } + if size, err = readInt16(&c.rbuf, size, &r[i].MinVersion); err != nil { + return nil, err + } + if size, err = readInt16(&c.rbuf, size, &r[i].MaxVersion); err != nil { + return nil, err + } + } + + if errorCode != 0 { + return r, Error(errorCode) + } + + return r, nil +} + +// connDeadline is a helper type to implement read/write deadline management on +// the kafka connection. +type connDeadline struct { + mutex sync.Mutex + value time.Time + rconn net.Conn + wconn net.Conn +} + +func (d *connDeadline) deadline() time.Time { + d.mutex.Lock() + t := d.value + d.mutex.Unlock() + return t +} + +func (d *connDeadline) setDeadline(t time.Time) { + d.mutex.Lock() + d.value = t + + if d.rconn != nil { + d.rconn.SetReadDeadline(t) + } + + if d.wconn != nil { + d.wconn.SetWriteDeadline(t) + } + + d.mutex.Unlock() +} + +func (d *connDeadline) setConnReadDeadline(conn net.Conn) time.Time { + d.mutex.Lock() + deadline := d.value + d.rconn = conn + d.rconn.SetReadDeadline(deadline) + d.mutex.Unlock() + return deadline +} + +func (d *connDeadline) setConnWriteDeadline(conn net.Conn) time.Time { + d.mutex.Lock() + deadline := d.value + d.wconn = conn + d.wconn.SetWriteDeadline(deadline) + d.mutex.Unlock() + return deadline +} + +func (d *connDeadline) unsetConnReadDeadline() { + d.mutex.Lock() + d.rconn = nil + d.mutex.Unlock() +} + +func (d *connDeadline) unsetConnWriteDeadline() { + d.mutex.Lock() + d.wconn = nil + d.mutex.Unlock() +} + +// saslHandshake sends the SASL handshake message. This will determine whether +// the Mechanism is supported by the cluster. If it's not, this function will +// error out with UnsupportedSASLMechanism. +// +// If the mechanism is unsupported, the handshake request will reply with the +// list of the cluster's configured mechanisms, which could potentially be used +// to facilitate negotiation. At the moment, we are not negotiating the +// mechanism as we believe that brokers are usually known to the client, and +// therefore the client should already know which mechanisms are supported. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake +func (c *Conn) saslHandshake(mechanism string) error { + // The wire format for V0 and V1 is identical, but the version + // number will affect how the SASL authentication + // challenge/responses are sent + var resp saslHandshakeResponseV0 + + version, err := c.negotiateVersion(saslHandshake, v0, v1) + if err != nil { + return err + } + + err = c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(saslHandshake, version, id, &saslHandshakeRequestV0{Mechanism: mechanism}) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (int, error) { + return (&resp).readFrom(&c.rbuf, size) + }()) + }, + ) + if err == nil && resp.ErrorCode != 0 { + err = Error(resp.ErrorCode) + } + return err +} + +// saslAuthenticate sends the SASL authenticate message. This function must +// be immediately preceded by a successful saslHandshake. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate +func (c *Conn) saslAuthenticate(data []byte) ([]byte, error) { + // if we sent a v1 handshake, then we must encapsulate the authentication + // request in a saslAuthenticateRequest. otherwise, we read and write raw + // bytes. + version, err := c.negotiateVersion(saslHandshake, v0, v1) + if err != nil { + return nil, err + } + if version == v1 { + var request = saslAuthenticateRequestV0{Data: data} + var response saslAuthenticateResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(saslAuthenticate, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + return response.Data, err + } + + // fall back to opaque bytes on the wire. the broker is expecting these if + // it just processed a v0 sasl handshake. + c.wb.writeInt32(int32(len(data))) + if _, err := c.wb.Write(data); err != nil { + return nil, err + } + if err := c.wb.Flush(); err != nil { + return nil, err + } + + var respLen int32 + if _, err := readInt32(&c.rbuf, 4, &respLen); err != nil { + return nil, err + } + + resp, _, err := readNewBytes(&c.rbuf, int(respLen), int(respLen)) + return resp, err +} diff --git a/vendor/github.com/segmentio/kafka-go/consumergroup.go b/vendor/github.com/segmentio/kafka-go/consumergroup.go new file mode 100644 index 00000000000..b9d0a7e2e24 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/consumergroup.go @@ -0,0 +1,1252 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// ErrGroupClosed is returned by ConsumerGroup.Next when the group has already +// been closed. +var ErrGroupClosed = errors.New("consumer group is closed") + +// ErrGenerationEnded is returned by the context.Context issued by the +// Generation's Start function when the context has been closed. +var ErrGenerationEnded = errors.New("consumer group generation has ended") + +const ( + // defaultProtocolType holds the default protocol type documented in the + // kafka protocol + // + // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI + defaultProtocolType = "consumer" + + // defaultHeartbeatInterval contains the default time between heartbeats. If + // the coordinator does not receive a heartbeat within the session timeout interval, + // the consumer will be considered dead and the coordinator will rebalance the + // group. + // + // As a rule, the heartbeat interval should be no greater than 1/3 the session timeout. + defaultHeartbeatInterval = 3 * time.Second + + // defaultSessionTimeout contains the default interval the coordinator will wait + // for a heartbeat before marking a consumer as dead. + defaultSessionTimeout = 30 * time.Second + + // defaultRebalanceTimeout contains the amount of time the coordinator will wait + // for consumers to issue a join group once a rebalance has been requested. + defaultRebalanceTimeout = 30 * time.Second + + // defaultJoinGroupBackoff is the amount of time to wait after a failed + // consumer group generation before attempting to re-join. + defaultJoinGroupBackoff = 5 * time.Second + + // defaultRetentionTime holds the length of time a the consumer group will be + // saved by kafka. This value tells the broker to use its configured value. + defaultRetentionTime = -1 * time.Millisecond + + // defaultPartitionWatchTime contains the amount of time the kafka-go will wait to + // query the brokers looking for partition changes. + defaultPartitionWatchTime = 5 * time.Second + + // defaultTimeout is the deadline to set when interacting with the + // consumer group coordinator. + defaultTimeout = 5 * time.Second +) + +// ConsumerGroupConfig is a configuration object used to create new instances of +// ConsumerGroup. +type ConsumerGroupConfig struct { + // ID is the consumer group ID. It must not be empty. + ID string + + // The list of broker addresses used to connect to the kafka cluster. It + // must not be empty. + Brokers []string + + // An dialer used to open connections to the kafka server. This field is + // optional, if nil, the default dialer is used instead. + Dialer *Dialer + + // Topics is the list of topics that will be consumed by this group. It + // will usually have a single value, but it is permitted to have multiple + // for more complex use cases. + Topics []string + + // GroupBalancers is the priority-ordered list of client-side consumer group + // balancing strategies that will be offered to the coordinator. The first + // strategy that all group members support will be chosen by the leader. + // + // Default: [Range, RoundRobin] + GroupBalancers []GroupBalancer + + // HeartbeatInterval sets the optional frequency at which the reader sends the consumer + // group heartbeat update. + // + // Default: 3s + HeartbeatInterval time.Duration + + // PartitionWatchInterval indicates how often a reader checks for partition changes. + // If a reader sees a partition change (such as a partition add) it will rebalance the group + // picking up new partitions. + // + // Default: 5s + PartitionWatchInterval time.Duration + + // WatchForPartitionChanges is used to inform kafka-go that a consumer group should be + // polling the brokers and rebalancing if any partition changes happen to the topic. + WatchPartitionChanges bool + + // SessionTimeout optionally sets the length of time that may pass without a heartbeat + // before the coordinator considers the consumer dead and initiates a rebalance. + // + // Default: 30s + SessionTimeout time.Duration + + // RebalanceTimeout optionally sets the length of time the coordinator will wait + // for members to join as part of a rebalance. For kafka servers under higher + // load, it may be useful to set this value higher. + // + // Default: 30s + RebalanceTimeout time.Duration + + // JoinGroupBackoff optionally sets the length of time to wait before re-joining + // the consumer group after an error. + // + // Default: 5s + JoinGroupBackoff time.Duration + + // RetentionTime optionally sets the length of time the consumer group will + // be saved by the broker. -1 will disable the setting and leave the + // retention up to the broker's offsets.retention.minutes property. By + // default, that setting is 1 day for kafka < 2.0 and 7 days for kafka >= + // 2.0. + // + // Default: -1 + RetentionTime time.Duration + + // StartOffset determines from whence the consumer group should begin + // consuming when it finds a partition without a committed offset. If + // non-zero, it must be set to one of FirstOffset or LastOffset. + // + // Default: FirstOffset + StartOffset int64 + + // If not nil, specifies a logger used to report internal changes within the + // reader. + Logger Logger + + // ErrorLogger is the logger used to report errors. If nil, the reader falls + // back to using Logger instead. + ErrorLogger Logger + + // Timeout is the network timeout used when communicating with the consumer + // group coordinator. This value should not be too small since errors + // communicating with the broker will generally cause a consumer group + // rebalance, and it's undesirable that a transient network error intoduce + // that overhead. Similarly, it should not be too large or the consumer + // group may be slow to respond to the coordinator failing over to another + // broker. + // + // Default: 5s + Timeout time.Duration + + // connect is a function for dialing the coordinator. This is provided for + // unit testing to mock broker connections. + connect func(dialer *Dialer, brokers ...string) (coordinator, error) +} + +// Validate method validates ConsumerGroupConfig properties and sets relevant +// defaults. +func (config *ConsumerGroupConfig) Validate() error { + + if len(config.Brokers) == 0 { + return errors.New("cannot create a consumer group with an empty list of broker addresses") + } + + if len(config.Topics) == 0 { + return errors.New("cannot create a consumer group without a topic") + } + + if config.ID == "" { + return errors.New("cannot create a consumer group without an ID") + } + + if config.Dialer == nil { + config.Dialer = DefaultDialer + } + + if len(config.GroupBalancers) == 0 { + config.GroupBalancers = []GroupBalancer{ + RangeGroupBalancer{}, + RoundRobinGroupBalancer{}, + } + } + + if config.HeartbeatInterval == 0 { + config.HeartbeatInterval = defaultHeartbeatInterval + } + + if config.SessionTimeout == 0 { + config.SessionTimeout = defaultSessionTimeout + } + + if config.PartitionWatchInterval == 0 { + config.PartitionWatchInterval = defaultPartitionWatchTime + } + + if config.RebalanceTimeout == 0 { + config.RebalanceTimeout = defaultRebalanceTimeout + } + + if config.JoinGroupBackoff == 0 { + config.JoinGroupBackoff = defaultJoinGroupBackoff + } + + if config.RetentionTime == 0 { + config.RetentionTime = defaultRetentionTime + } + + if config.HeartbeatInterval < 0 || (config.HeartbeatInterval/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("HeartbeatInterval out of bounds: %d", config.HeartbeatInterval) + } + + if config.SessionTimeout < 0 || (config.SessionTimeout/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("SessionTimeout out of bounds: %d", config.SessionTimeout) + } + + if config.RebalanceTimeout < 0 || (config.RebalanceTimeout/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("RebalanceTimeout out of bounds: %d", config.RebalanceTimeout) + } + + if config.JoinGroupBackoff < 0 || (config.JoinGroupBackoff/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("JoinGroupBackoff out of bounds: %d", config.JoinGroupBackoff) + } + + if config.RetentionTime < 0 && config.RetentionTime != defaultRetentionTime { + return fmt.Errorf("RetentionTime out of bounds: %d", config.RetentionTime) + } + + if config.PartitionWatchInterval < 0 || (config.PartitionWatchInterval/time.Millisecond) >= math.MaxInt32 { + return fmt.Errorf("PartitionWachInterval out of bounds %d", config.PartitionWatchInterval) + } + + if config.StartOffset == 0 { + config.StartOffset = FirstOffset + } + + if config.StartOffset != FirstOffset && config.StartOffset != LastOffset { + return fmt.Errorf("StartOffset is not valid %d", config.StartOffset) + } + + if config.Timeout == 0 { + config.Timeout = defaultTimeout + } + + if config.connect == nil { + config.connect = makeConnect(*config) + } + + return nil +} + +// PartitionAssignment represents the starting state of a partition that has +// been assigned to a consumer. +type PartitionAssignment struct { + // ID is the partition ID. + ID int + + // Offset is the initial offset at which this assignment begins. It will + // either be an absolute offset if one has previously been committed for + // the consumer group or a relative offset such as FirstOffset when this + // is the first time the partition have been assigned to a member of the + // group. + Offset int64 +} + +// genCtx adapts the done channel of the generation to a context.Context. This +// is used by Generation.Start so that we can pass a context to go routines +// instead of passing around channels. +type genCtx struct { + gen *Generation +} + +func (c genCtx) Done() <-chan struct{} { + return c.gen.done +} + +func (c genCtx) Err() error { + select { + case <-c.gen.done: + return ErrGenerationEnded + default: + return nil + } +} + +func (c genCtx) Deadline() (time.Time, bool) { + return time.Time{}, false +} + +func (c genCtx) Value(interface{}) interface{} { + return nil +} + +// Generation represents a single consumer group generation. The generation +// carries the topic+partition assignments for the given. It also provides +// facilities for committing offsets and for running functions whose lifecycles +// are bound to the generation. +type Generation struct { + // ID is the generation ID as assigned by the consumer group coordinator. + ID int32 + + // GroupID is the name of the consumer group. + GroupID string + + // MemberID is the ID assigned to this consumer by the consumer group + // coordinator. + MemberID string + + // Assignments is the initial state of this Generation. The partition + // assignments are grouped by topic. + Assignments map[string][]PartitionAssignment + + conn coordinator + + // the following fields are used for process accounting to synchronize + // between Start and close. lock protects all of them. done is closed + // when the generation is ending in order to signal that the generation + // should start self-desructing. closed protects against double-closing + // the done chan. routines is a count of running go routines that have been + // launched by Start. joined will be closed by the last go routine to exit. + lock sync.Mutex + done chan struct{} + closed bool + routines int + joined chan struct{} + + retentionMillis int64 + log func(func(Logger)) + logError func(func(Logger)) +} + +// close stops the generation and waits for all functions launched via Start to +// terminate. +func (g *Generation) close() { + g.lock.Lock() + if !g.closed { + close(g.done) + g.closed = true + } + // determine whether any go routines are running that we need to wait for. + // waiting needs to happen outside of the critical section. + r := g.routines + g.lock.Unlock() + + // NOTE: r will be zero if no go routines were ever launched. no need to + // wait in that case. + if r > 0 { + <-g.joined + } +} + +// Start launches the provided function in a go routine and adds accounting such +// that when the function exits, it stops the current generation (if not +// already in the process of doing so). +// +// The provided function MUST support cancellation via the ctx argument and exit +// in a timely manner once the ctx is complete. When the context is closed, the +// context's Error() function will return ErrGenerationEnded. +// +// When closing out a generation, the consumer group will wait for all functions +// launched by Start to exit before the group can move on and join the next +// generation. If the function does not exit promptly, it will stop forward +// progress for this consumer and potentially cause consumer group membership +// churn. +func (g *Generation) Start(fn func(ctx context.Context)) { + g.lock.Lock() + defer g.lock.Unlock() + + // this is an edge case: if the generation has already closed, then it's + // possible that the close func has already waited on outstanding go + // routines and exited. + // + // nonetheless, it's important to honor that the fn is invoked in case the + // calling function is waiting e.g. on a channel send or a WaitGroup. in + // such a case, fn should immediately exit because ctx.Err() will return + // ErrGenerationEnded. + if g.closed { + go fn(genCtx{g}) + return + } + + // register that there is one more go routine that's part of this gen. + g.routines++ + + go func() { + fn(genCtx{g}) + g.lock.Lock() + // shut down the generation as soon as one function exits. this is + // different from close() in that it doesn't wait for all go routines in + // the generation to exit. + if !g.closed { + close(g.done) + g.closed = true + } + g.routines-- + // if this was the last go routine in the generation, close the joined + // chan so that close() can exit if it's waiting. + if g.routines == 0 { + close(g.joined) + } + g.lock.Unlock() + }() +} + +// CommitOffsets commits the provided topic+partition+offset combos to the +// consumer group coordinator. This can be used to reset the consumer to +// explicit offsets. +func (g *Generation) CommitOffsets(offsets map[string]map[int]int64) error { + if len(offsets) == 0 { + return nil + } + + topics := make([]offsetCommitRequestV2Topic, 0, len(offsets)) + for topic, partitions := range offsets { + t := offsetCommitRequestV2Topic{Topic: topic} + for partition, offset := range partitions { + t.Partitions = append(t.Partitions, offsetCommitRequestV2Partition{ + Partition: int32(partition), + Offset: offset, + }) + } + topics = append(topics, t) + } + + request := offsetCommitRequestV2{ + GroupID: g.GroupID, + GenerationID: g.ID, + MemberID: g.MemberID, + RetentionTime: g.retentionMillis, + Topics: topics, + } + + _, err := g.conn.offsetCommit(request) + if err == nil { + // if logging is enabled, print out the partitions that were committed. + g.log(func(l Logger) { + var report []string + for _, t := range request.Topics { + report = append(report, fmt.Sprintf("\ttopic: %s", t.Topic)) + for _, p := range t.Partitions { + report = append(report, fmt.Sprintf("\t\tpartition %d: %d", p.Partition, p.Offset)) + } + } + l.Printf("committed offsets for group %s: \n%s", g.GroupID, strings.Join(report, "\n")) + }) + } + + return err +} + +// heartbeatLoop checks in with the consumer group coordinator at the provided +// interval. It exits if it ever encounters an error, which would signal the +// end of the generation. +func (g *Generation) heartbeatLoop(interval time.Duration) { + g.Start(func(ctx context.Context) { + g.log(func(l Logger) { + l.Printf("started heartbeat for group, %v [%v]", g.GroupID, interval) + }) + defer g.log(func(l Logger) { + l.Printf("stopped heartbeat for group %s\n", g.GroupID) + }) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + _, err := g.conn.heartbeat(heartbeatRequestV0{ + GroupID: g.GroupID, + GenerationID: g.ID, + MemberID: g.MemberID, + }) + if err != nil { + return + } + } + } + }) +} + +// partitionWatcher queries kafka and watches for partition changes, triggering +// a rebalance if changes are found. Similar to heartbeat it's okay to return on +// error here as if you are unable to ask a broker for basic metadata you're in +// a bad spot and should rebalance. Commonly you will see an error here if there +// is a problem with the connection to the coordinator and a rebalance will +// establish a new connection to the coordinator. +func (g *Generation) partitionWatcher(interval time.Duration, topic string) { + g.Start(func(ctx context.Context) { + g.log(func(l Logger) { + l.Printf("started partition watcher for group, %v, topic %v [%v]", g.GroupID, topic, interval) + }) + defer g.log(func(l Logger) { + l.Printf("stopped partition watcher for group, %v, topic %v", g.GroupID, topic) + }) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + ops, err := g.conn.readPartitions(topic) + if err != nil { + g.logError(func(l Logger) { + l.Printf("Problem getting partitions during startup, %v\n, Returning and setting up nextGeneration", err) + }) + return + } + oParts := len(ops) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ops, err := g.conn.readPartitions(topic) + switch { + case err == nil, errors.Is(err, UnknownTopicOrPartition): + if len(ops) != oParts { + g.log(func(l Logger) { + l.Printf("Partition changes found, reblancing group: %v.", g.GroupID) + }) + return + } + + default: + g.logError(func(l Logger) { + l.Printf("Problem getting partitions while checking for changes, %v", err) + }) + var kafkaError Error + if errors.As(err, &kafkaError) { + continue + } + // other errors imply that we lost the connection to the coordinator, so we + // should abort and reconnect. + return + } + } + } + }) +} + +// coordinator is a subset of the functionality in Conn in order to facilitate +// testing the consumer group...especially for error conditions that are +// difficult to instigate with a live broker running in docker. +type coordinator interface { + io.Closer + findCoordinator(findCoordinatorRequestV0) (findCoordinatorResponseV0, error) + joinGroup(joinGroupRequestV1) (joinGroupResponseV1, error) + syncGroup(syncGroupRequestV0) (syncGroupResponseV0, error) + leaveGroup(leaveGroupRequestV0) (leaveGroupResponseV0, error) + heartbeat(heartbeatRequestV0) (heartbeatResponseV0, error) + offsetFetch(offsetFetchRequestV1) (offsetFetchResponseV1, error) + offsetCommit(offsetCommitRequestV2) (offsetCommitResponseV2, error) + readPartitions(...string) ([]Partition, error) +} + +// timeoutCoordinator wraps the Conn to ensure that every operation has a +// deadline. Otherwise, it would be possible for requests to block indefinitely +// if the remote server never responds. There are many spots where the consumer +// group needs to interact with the broker, so it feels less error prone to +// factor all of the deadline management into this shared location as opposed to +// peppering it all through where the code actually interacts with the broker. +type timeoutCoordinator struct { + timeout time.Duration + sessionTimeout time.Duration + rebalanceTimeout time.Duration + conn *Conn +} + +func (t *timeoutCoordinator) Close() error { + return t.conn.Close() +} + +func (t *timeoutCoordinator) findCoordinator(req findCoordinatorRequestV0) (findCoordinatorResponseV0, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return findCoordinatorResponseV0{}, err + } + return t.conn.findCoordinator(req) +} + +func (t *timeoutCoordinator) joinGroup(req joinGroupRequestV1) (joinGroupResponseV1, error) { + // in the case of join group, the consumer group coordinator may wait up + // to rebalance timeout in order to wait for all members to join. + if err := t.conn.SetDeadline(time.Now().Add(t.timeout + t.rebalanceTimeout)); err != nil { + return joinGroupResponseV1{}, err + } + return t.conn.joinGroup(req) +} + +func (t *timeoutCoordinator) syncGroup(req syncGroupRequestV0) (syncGroupResponseV0, error) { + // in the case of sync group, the consumer group leader is given up to + // the session timeout to respond before the coordinator will give up. + if err := t.conn.SetDeadline(time.Now().Add(t.timeout + t.sessionTimeout)); err != nil { + return syncGroupResponseV0{}, err + } + return t.conn.syncGroup(req) +} + +func (t *timeoutCoordinator) leaveGroup(req leaveGroupRequestV0) (leaveGroupResponseV0, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return leaveGroupResponseV0{}, err + } + return t.conn.leaveGroup(req) +} + +func (t *timeoutCoordinator) heartbeat(req heartbeatRequestV0) (heartbeatResponseV0, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return heartbeatResponseV0{}, err + } + return t.conn.heartbeat(req) +} + +func (t *timeoutCoordinator) offsetFetch(req offsetFetchRequestV1) (offsetFetchResponseV1, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return offsetFetchResponseV1{}, err + } + return t.conn.offsetFetch(req) +} + +func (t *timeoutCoordinator) offsetCommit(req offsetCommitRequestV2) (offsetCommitResponseV2, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return offsetCommitResponseV2{}, err + } + return t.conn.offsetCommit(req) +} + +func (t *timeoutCoordinator) readPartitions(topics ...string) ([]Partition, error) { + if err := t.conn.SetDeadline(time.Now().Add(t.timeout)); err != nil { + return nil, err + } + return t.conn.ReadPartitions(topics...) +} + +// NewConsumerGroup creates a new ConsumerGroup. It returns an error if the +// provided configuration is invalid. It does not attempt to connect to the +// Kafka cluster. That happens asynchronously, and any errors will be reported +// by Next. +func NewConsumerGroup(config ConsumerGroupConfig) (*ConsumerGroup, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + cg := &ConsumerGroup{ + config: config, + next: make(chan *Generation), + errs: make(chan error), + done: make(chan struct{}), + } + cg.wg.Add(1) + go func() { + cg.run() + cg.wg.Done() + }() + return cg, nil +} + +// ConsumerGroup models a Kafka consumer group. A caller doesn't interact with +// the group directly. Rather, they interact with a Generation. Every time a +// member enters or exits the group, it results in a new Generation. The +// Generation is where partition assignments and offset management occur. +// Callers will use Next to get a handle to the Generation. +type ConsumerGroup struct { + config ConsumerGroupConfig + next chan *Generation + errs chan error + + closeOnce sync.Once + wg sync.WaitGroup + done chan struct{} +} + +// Close terminates the current generation by causing this member to leave and +// releases all local resources used to participate in the consumer group. +// Close will also end the current generation if it is still active. +func (cg *ConsumerGroup) Close() error { + cg.closeOnce.Do(func() { + close(cg.done) + }) + cg.wg.Wait() + return nil +} + +// Next waits for the next consumer group generation. There will never be two +// active generations. Next will never return a new generation until the +// previous one has completed. +// +// If there are errors setting up the next generation, they will be surfaced +// here. +// +// If the ConsumerGroup has been closed, then Next will return ErrGroupClosed. +func (cg *ConsumerGroup) Next(ctx context.Context) (*Generation, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-cg.done: + return nil, ErrGroupClosed + case err := <-cg.errs: + return nil, err + case next := <-cg.next: + return next, nil + } +} + +func (cg *ConsumerGroup) run() { + // the memberID is the only piece of information that is maintained across + // generations. it starts empty and will be assigned on the first nextGeneration + // when the joinGroup request is processed. it may change again later if + // the CG coordinator fails over or if the member is evicted. otherwise, it + // will be constant for the lifetime of this group. + var memberID string + var err error + for { + memberID, err = cg.nextGeneration(memberID) + + // backoff will be set if this go routine should sleep before continuing + // to the next generation. it will be non-nil in the case of an error + // joining or syncing the group. + var backoff <-chan time.Time + + switch { + case err == nil: + // no error...the previous generation finished normally. + continue + + case errors.Is(err, ErrGroupClosed): + // the CG has been closed...leave the group and exit loop. + _ = cg.leaveGroup(memberID) + return + + case errors.Is(err, RebalanceInProgress): + // in case of a RebalanceInProgress, don't leave the group or + // change the member ID, but report the error. the next attempt + // to join the group will then be subject to the rebalance + // timeout, so the broker will be responsible for throttling + // this loop. + + default: + // leave the group and report the error if we had gotten far + // enough so as to have a member ID. also clear the member id + // so we don't attempt to use it again. in order to avoid + // a tight error loop, backoff before the next attempt to join + // the group. + _ = cg.leaveGroup(memberID) + memberID = "" + backoff = time.After(cg.config.JoinGroupBackoff) + } + // ensure that we exit cleanly in case the CG is done and no one is + // waiting to receive on the unbuffered error channel. + select { + case <-cg.done: + return + case cg.errs <- err: + } + // backoff if needed, being sure to exit cleanly if the CG is done. + if backoff != nil { + select { + case <-cg.done: + // exit cleanly if the group is closed. + return + case <-backoff: + } + } + } +} + +func (cg *ConsumerGroup) nextGeneration(memberID string) (string, error) { + // get a new connection to the coordinator on each loop. the previous + // generation could have exited due to losing the connection, so this + // ensures that we always have a clean starting point. it means we will + // re-connect in certain cases, but that shouldn't be an issue given that + // rebalances are relatively infrequent under normal operating + // conditions. + conn, err := cg.coordinator() + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("Unable to establish connection to consumer group coordinator for group %s: %v", cg.config.ID, err) + }) + return memberID, err // a prior memberID may still be valid, so don't return "" + } + defer conn.Close() + + var generationID int32 + var groupAssignments GroupMemberAssignments + var assignments map[string][]int32 + + // join group. this will join the group and prepare assignments if our + // consumer is elected leader. it may also change or assign the member ID. + memberID, generationID, groupAssignments, err = cg.joinGroup(conn, memberID) + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("Failed to join group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + cg.withLogger(func(log Logger) { + log.Printf("Joined group %s as member %s in generation %d", cg.config.ID, memberID, generationID) + }) + + // sync group + assignments, err = cg.syncGroup(conn, memberID, generationID, groupAssignments) + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("Failed to sync group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + + // fetch initial offsets. + var offsets map[string]map[int]int64 + offsets, err = cg.fetchOffsets(conn, assignments) + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("Failed to fetch offsets for group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + + // create the generation. + gen := Generation{ + ID: generationID, + GroupID: cg.config.ID, + MemberID: memberID, + Assignments: cg.makeAssignments(assignments, offsets), + conn: conn, + done: make(chan struct{}), + joined: make(chan struct{}), + retentionMillis: int64(cg.config.RetentionTime / time.Millisecond), + log: cg.withLogger, + logError: cg.withErrorLogger, + } + + // spawn all of the go routines required to facilitate this generation. if + // any of these functions exit, then the generation is determined to be + // complete. + gen.heartbeatLoop(cg.config.HeartbeatInterval) + if cg.config.WatchPartitionChanges { + for _, topic := range cg.config.Topics { + gen.partitionWatcher(cg.config.PartitionWatchInterval, topic) + } + } + + // make this generation available for retrieval. if the CG is closed before + // we can send it on the channel, exit. that case is required b/c the next + // channel is unbuffered. if the caller to Next has already bailed because + // it's own teardown logic has been invoked, this would deadlock otherwise. + select { + case <-cg.done: + gen.close() + return memberID, ErrGroupClosed // ErrGroupClosed will trigger leave logic. + case cg.next <- &gen: + } + + // wait for generation to complete. if the CG is closed before the + // generation is finished, exit and leave the group. + select { + case <-cg.done: + gen.close() + return memberID, ErrGroupClosed // ErrGroupClosed will trigger leave logic. + case <-gen.done: + // time for next generation! make sure all the current go routines exit + // before continuing onward. + gen.close() + return memberID, nil + } +} + +// connect returns a connection to ANY broker. +func makeConnect(config ConsumerGroupConfig) func(dialer *Dialer, brokers ...string) (coordinator, error) { + return func(dialer *Dialer, brokers ...string) (coordinator, error) { + var err error + for _, broker := range brokers { + var conn *Conn + if conn, err = dialer.Dial("tcp", broker); err == nil { + return &timeoutCoordinator{ + conn: conn, + timeout: config.Timeout, + sessionTimeout: config.SessionTimeout, + rebalanceTimeout: config.RebalanceTimeout, + }, nil + } + } + return nil, err // err will be non-nil + } +} + +// coordinator establishes a connection to the coordinator for this consumer +// group. +func (cg *ConsumerGroup) coordinator() (coordinator, error) { + // NOTE : could try to cache the coordinator to avoid the double connect + // here. since consumer group balances happen infrequently and are + // an expensive operation, we're not currently optimizing that case + // in order to keep the code simpler. + conn, err := cg.config.connect(cg.config.Dialer, cg.config.Brokers...) + if err != nil { + return nil, err + } + defer conn.Close() + + out, err := conn.findCoordinator(findCoordinatorRequestV0{ + CoordinatorKey: cg.config.ID, + }) + if err == nil && out.ErrorCode != 0 { + err = Error(out.ErrorCode) + } + if err != nil { + return nil, err + } + + address := net.JoinHostPort(out.Coordinator.Host, strconv.Itoa(int(out.Coordinator.Port))) + return cg.config.connect(cg.config.Dialer, address) +} + +// joinGroup attempts to join the reader to the consumer group. +// Returns GroupMemberAssignments is this Reader was selected as +// the leader. Otherwise, GroupMemberAssignments will be nil. +// +// Possible kafka error codes returned: +// * GroupLoadInProgress: +// * GroupCoordinatorNotAvailable: +// * NotCoordinatorForGroup: +// * InconsistentGroupProtocol: +// * InvalidSessionTimeout: +// * GroupAuthorizationFailed: +func (cg *ConsumerGroup) joinGroup(conn coordinator, memberID string) (string, int32, GroupMemberAssignments, error) { + request, err := cg.makeJoinGroupRequestV1(memberID) + if err != nil { + return "", 0, nil, err + } + + response, err := conn.joinGroup(request) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + if err != nil { + return "", 0, nil, err + } + + memberID = response.MemberID + generationID := response.GenerationID + + cg.withLogger(func(l Logger) { + l.Printf("joined group %s as member %s in generation %d", cg.config.ID, memberID, generationID) + }) + + var assignments GroupMemberAssignments + if iAmLeader := response.MemberID == response.LeaderID; iAmLeader { + v, err := cg.assignTopicPartitions(conn, response) + if err != nil { + return memberID, 0, nil, err + } + assignments = v + + cg.withLogger(func(l Logger) { + for memberID, assignment := range assignments { + for topic, partitions := range assignment { + l.Printf("assigned member/topic/partitions %v/%v/%v", memberID, topic, partitions) + } + } + }) + } + + cg.withLogger(func(l Logger) { + l.Printf("joinGroup succeeded for response, %v. generationID=%v, memberID=%v", cg.config.ID, response.GenerationID, response.MemberID) + }) + + return memberID, generationID, assignments, nil +} + +// makeJoinGroupRequestV1 handles the logic of constructing a joinGroup +// request. +func (cg *ConsumerGroup) makeJoinGroupRequestV1(memberID string) (joinGroupRequestV1, error) { + request := joinGroupRequestV1{ + GroupID: cg.config.ID, + MemberID: memberID, + SessionTimeout: int32(cg.config.SessionTimeout / time.Millisecond), + RebalanceTimeout: int32(cg.config.RebalanceTimeout / time.Millisecond), + ProtocolType: defaultProtocolType, + } + + for _, balancer := range cg.config.GroupBalancers { + userData, err := balancer.UserData() + if err != nil { + return joinGroupRequestV1{}, fmt.Errorf("unable to construct protocol metadata for member, %v: %w", balancer.ProtocolName(), err) + } + request.GroupProtocols = append(request.GroupProtocols, joinGroupRequestGroupProtocolV1{ + ProtocolName: balancer.ProtocolName(), + ProtocolMetadata: groupMetadata{ + Version: 1, + Topics: cg.config.Topics, + UserData: userData, + }.bytes(), + }) + } + + return request, nil +} + +// assignTopicPartitions uses the selected GroupBalancer to assign members to +// their various partitions. +func (cg *ConsumerGroup) assignTopicPartitions(conn coordinator, group joinGroupResponseV1) (GroupMemberAssignments, error) { + cg.withLogger(func(l Logger) { + l.Printf("selected as leader for group, %s\n", cg.config.ID) + }) + + balancer, ok := findGroupBalancer(group.GroupProtocol, cg.config.GroupBalancers) + if !ok { + // NOTE : this shouldn't happen in practice...the broker should not + // return successfully from joinGroup unless all members support + // at least one common protocol. + return nil, fmt.Errorf("unable to find selected balancer, %v, for group, %v", group.GroupProtocol, cg.config.ID) + } + + members, err := cg.makeMemberProtocolMetadata(group.Members) + if err != nil { + return nil, err + } + + topics := extractTopics(members) + partitions, err := conn.readPartitions(topics...) + + // it's not a failure if the topic doesn't exist yet. it results in no + // assignments for the topic. this matches the behavior of the official + // clients: java, python, and librdkafka. + // a topic watcher can trigger a rebalance when the topic comes into being. + if err != nil && !errors.Is(err, UnknownTopicOrPartition) { + return nil, err + } + + cg.withLogger(func(l Logger) { + l.Printf("using '%v' balancer to assign group, %v", group.GroupProtocol, cg.config.ID) + for _, member := range members { + l.Printf("found member: %v/%#v", member.ID, member.UserData) + } + for _, partition := range partitions { + l.Printf("found topic/partition: %v/%v", partition.Topic, partition.ID) + } + }) + + return balancer.AssignGroups(members, partitions), nil +} + +// makeMemberProtocolMetadata maps encoded member metadata ([]byte) into []GroupMember. +func (cg *ConsumerGroup) makeMemberProtocolMetadata(in []joinGroupResponseMemberV1) ([]GroupMember, error) { + members := make([]GroupMember, 0, len(in)) + for _, item := range in { + metadata := groupMetadata{} + reader := bufio.NewReader(bytes.NewReader(item.MemberMetadata)) + if remain, err := (&metadata).readFrom(reader, len(item.MemberMetadata)); err != nil || remain != 0 { + return nil, fmt.Errorf("unable to read metadata for member, %v: %w", item.MemberID, err) + } + + members = append(members, GroupMember{ + ID: item.MemberID, + Topics: metadata.Topics, + UserData: metadata.UserData, + }) + } + return members, nil +} + +// syncGroup completes the consumer group nextGeneration by accepting the +// memberAssignments (if this Reader is the leader) and returning this +// Readers subscriptions topic => partitions +// +// Possible kafka error codes returned: +// * GroupCoordinatorNotAvailable: +// * NotCoordinatorForGroup: +// * IllegalGeneration: +// * RebalanceInProgress: +// * GroupAuthorizationFailed: +func (cg *ConsumerGroup) syncGroup(conn coordinator, memberID string, generationID int32, memberAssignments GroupMemberAssignments) (map[string][]int32, error) { + request := cg.makeSyncGroupRequestV0(memberID, generationID, memberAssignments) + response, err := conn.syncGroup(request) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + if err != nil { + return nil, err + } + + assignments := groupAssignment{} + reader := bufio.NewReader(bytes.NewReader(response.MemberAssignments)) + if _, err := (&assignments).readFrom(reader, len(response.MemberAssignments)); err != nil { + return nil, err + } + + if len(assignments.Topics) == 0 { + cg.withLogger(func(l Logger) { + l.Printf("received empty assignments for group, %v as member %s for generation %d", cg.config.ID, memberID, generationID) + }) + } + + cg.withLogger(func(l Logger) { + l.Printf("sync group finished for group, %v", cg.config.ID) + }) + + return assignments.Topics, nil +} + +func (cg *ConsumerGroup) makeSyncGroupRequestV0(memberID string, generationID int32, memberAssignments GroupMemberAssignments) syncGroupRequestV0 { + request := syncGroupRequestV0{ + GroupID: cg.config.ID, + GenerationID: generationID, + MemberID: memberID, + } + + if memberAssignments != nil { + request.GroupAssignments = make([]syncGroupRequestGroupAssignmentV0, 0, 1) + + for memberID, topics := range memberAssignments { + topics32 := make(map[string][]int32) + for topic, partitions := range topics { + partitions32 := make([]int32, len(partitions)) + for i := range partitions { + partitions32[i] = int32(partitions[i]) + } + topics32[topic] = partitions32 + } + request.GroupAssignments = append(request.GroupAssignments, syncGroupRequestGroupAssignmentV0{ + MemberID: memberID, + MemberAssignments: groupAssignment{ + Version: 1, + Topics: topics32, + }.bytes(), + }) + } + + cg.withLogger(func(logger Logger) { + logger.Printf("Syncing %d assignments for generation %d as member %s", len(request.GroupAssignments), generationID, memberID) + }) + } + + return request +} + +func (cg *ConsumerGroup) fetchOffsets(conn coordinator, subs map[string][]int32) (map[string]map[int]int64, error) { + req := offsetFetchRequestV1{ + GroupID: cg.config.ID, + Topics: make([]offsetFetchRequestV1Topic, 0, len(cg.config.Topics)), + } + for _, topic := range cg.config.Topics { + req.Topics = append(req.Topics, offsetFetchRequestV1Topic{ + Topic: topic, + Partitions: subs[topic], + }) + } + offsets, err := conn.offsetFetch(req) + if err != nil { + return nil, err + } + + offsetsByTopic := make(map[string]map[int]int64) + for _, res := range offsets.Responses { + offsetsByPartition := map[int]int64{} + offsetsByTopic[res.Topic] = offsetsByPartition + for _, pr := range res.PartitionResponses { + for _, partition := range subs[res.Topic] { + if partition == pr.Partition { + offset := pr.Offset + if offset < 0 { + offset = cg.config.StartOffset + } + offsetsByPartition[int(partition)] = offset + } + } + } + } + + return offsetsByTopic, nil +} + +func (cg *ConsumerGroup) makeAssignments(assignments map[string][]int32, offsets map[string]map[int]int64) map[string][]PartitionAssignment { + topicAssignments := make(map[string][]PartitionAssignment) + for _, topic := range cg.config.Topics { + topicPartitions := assignments[topic] + topicAssignments[topic] = make([]PartitionAssignment, 0, len(topicPartitions)) + for _, partition := range topicPartitions { + var offset int64 + partitionOffsets, ok := offsets[topic] + if ok { + offset, ok = partitionOffsets[int(partition)] + } + if !ok { + offset = cg.config.StartOffset + } + topicAssignments[topic] = append(topicAssignments[topic], PartitionAssignment{ + ID: int(partition), + Offset: offset, + }) + } + } + return topicAssignments +} + +func (cg *ConsumerGroup) leaveGroup(memberID string) error { + // don't attempt to leave the group if no memberID was ever assigned. + if memberID == "" { + return nil + } + + cg.withLogger(func(log Logger) { + log.Printf("Leaving group %s, member %s", cg.config.ID, memberID) + }) + + // IMPORTANT : leaveGroup establishes its own connection to the coordinator + // because it is often called after some other operation failed. + // said failure could be the result of connection-level issues, + // so we want to re-establish the connection to ensure that we + // are able to process the cleanup step. + coordinator, err := cg.coordinator() + if err != nil { + return err + } + + _, err = coordinator.leaveGroup(leaveGroupRequestV0{ + GroupID: cg.config.ID, + MemberID: memberID, + }) + if err != nil { + cg.withErrorLogger(func(log Logger) { + log.Printf("leave group failed for group, %v, and member, %v: %v", cg.config.ID, memberID, err) + }) + } + + _ = coordinator.Close() + + return err +} + +func (cg *ConsumerGroup) withLogger(do func(Logger)) { + if cg.config.Logger != nil { + do(cg.config.Logger) + } +} + +func (cg *ConsumerGroup) withErrorLogger(do func(Logger)) { + if cg.config.ErrorLogger != nil { + do(cg.config.ErrorLogger) + } else { + cg.withLogger(do) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/crc32.go b/vendor/github.com/segmentio/kafka-go/crc32.go new file mode 100644 index 00000000000..fef68342892 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/crc32.go @@ -0,0 +1,55 @@ +package kafka + +import ( + "encoding/binary" + "hash/crc32" +) + +type crc32Writer struct { + table *crc32.Table + buffer [8]byte + crc32 uint32 +} + +func (w *crc32Writer) update(b []byte) { + w.crc32 = crc32.Update(w.crc32, w.table, b) +} + +func (w *crc32Writer) writeInt8(i int8) { + w.buffer[0] = byte(i) + w.update(w.buffer[:1]) +} + +func (w *crc32Writer) writeInt16(i int16) { + binary.BigEndian.PutUint16(w.buffer[:2], uint16(i)) + w.update(w.buffer[:2]) +} + +func (w *crc32Writer) writeInt32(i int32) { + binary.BigEndian.PutUint32(w.buffer[:4], uint32(i)) + w.update(w.buffer[:4]) +} + +func (w *crc32Writer) writeInt64(i int64) { + binary.BigEndian.PutUint64(w.buffer[:8], uint64(i)) + w.update(w.buffer[:8]) +} + +func (w *crc32Writer) writeBytes(b []byte) { + n := len(b) + if b == nil { + n = -1 + } + w.writeInt32(int32(n)) + w.update(b) +} + +func (w *crc32Writer) Write(b []byte) (int, error) { + w.update(b) + return len(b), nil +} + +func (w *crc32Writer) WriteString(s string) (int, error) { + w.update([]byte(s)) + return len(s), nil +} diff --git a/vendor/github.com/segmentio/kafka-go/createacls.go b/vendor/github.com/segmentio/kafka-go/createacls.go new file mode 100644 index 00000000000..672f6fdce91 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/createacls.go @@ -0,0 +1,108 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/createacls" +) + +// CreateACLsRequest represents a request sent to a kafka broker to add +// new ACLs. +type CreateACLsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of ACL to create. + ACLs []ACLEntry +} + +// CreateACLsResponse represents a response from a kafka broker to an ACL +// creation request. +type CreateACLsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // List of errors that occurred while attempting to create + // the ACLs. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors []error +} + +type ACLPermissionType int8 + +const ( + ACLPermissionTypeUnknown ACLPermissionType = 0 + ACLPermissionTypeAny ACLPermissionType = 1 + ACLPermissionTypeDeny ACLPermissionType = 2 + ACLPermissionTypeAllow ACLPermissionType = 3 +) + +type ACLOperationType int8 + +const ( + ACLOperationTypeUnknown ACLOperationType = 0 + ACLOperationTypeAny ACLOperationType = 1 + ACLOperationTypeAll ACLOperationType = 2 + ACLOperationTypeRead ACLOperationType = 3 + ACLOperationTypeWrite ACLOperationType = 4 + ACLOperationTypeCreate ACLOperationType = 5 + ACLOperationTypeDelete ACLOperationType = 6 + ACLOperationTypeAlter ACLOperationType = 7 + ACLOperationTypeDescribe ACLOperationType = 8 + ACLOperationTypeClusterAction ACLOperationType = 9 + ACLOperationTypeDescribeConfigs ACLOperationType = 10 + ACLOperationTypeAlterConfigs ACLOperationType = 11 + ACLOperationTypeIdempotentWrite ACLOperationType = 12 +) + +type ACLEntry struct { + ResourceType ResourceType + ResourceName string + ResourcePatternType PatternType + Principal string + Host string + Operation ACLOperationType + PermissionType ACLPermissionType +} + +// CreateACLs sends ACLs creation request to a kafka broker and returns the +// response. +func (c *Client) CreateACLs(ctx context.Context, req *CreateACLsRequest) (*CreateACLsResponse, error) { + acls := make([]createacls.RequestACLs, 0, len(req.ACLs)) + + for _, acl := range req.ACLs { + acls = append(acls, createacls.RequestACLs{ + ResourceType: int8(acl.ResourceType), + ResourceName: acl.ResourceName, + ResourcePatternType: int8(acl.ResourcePatternType), + Principal: acl.Principal, + Host: acl.Host, + Operation: int8(acl.Operation), + PermissionType: int8(acl.PermissionType), + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &createacls.Request{ + Creations: acls, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).CreateACLs: %w", err) + } + + res := m.(*createacls.Response) + ret := &CreateACLsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make([]error, 0, len(res.Results)), + } + + for _, t := range res.Results { + ret.Errors = append(ret.Errors, makeError(t.ErrorCode, t.ErrorMessage)) + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/createpartitions.go b/vendor/github.com/segmentio/kafka-go/createpartitions.go new file mode 100644 index 00000000000..d4c0d0e703e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/createpartitions.go @@ -0,0 +1,103 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/createpartitions" +) + +// CreatePartitionsRequest represents a request sent to a kafka broker to create +// and update topic parititions. +type CreatePartitionsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of topics to create and their configuration. + Topics []TopicPartitionsConfig + + // When set to true, topics are not created but the configuration is + // validated as if they were. + ValidateOnly bool +} + +// CreatePartitionsResponse represents a response from a kafka broker to a partition +// creation request. +type CreatePartitionsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Mapping of topic names to errors that occurred while attempting to create + // the topics. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[string]error +} + +// CreatePartitions sends a partitions creation request to a kafka broker and returns the +// response. +func (c *Client) CreatePartitions(ctx context.Context, req *CreatePartitionsRequest) (*CreatePartitionsResponse, error) { + topics := make([]createpartitions.RequestTopic, len(req.Topics)) + + for i, t := range req.Topics { + topics[i] = createpartitions.RequestTopic{ + Name: t.Name, + Count: t.Count, + Assignments: t.assignments(), + } + } + + m, err := c.roundTrip(ctx, req.Addr, &createpartitions.Request{ + Topics: topics, + TimeoutMs: c.timeoutMs(ctx, defaultCreatePartitionsTimeout), + ValidateOnly: req.ValidateOnly, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).CreatePartitions: %w", err) + } + + res := m.(*createpartitions.Response) + ret := &CreatePartitionsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make(map[string]error, len(res.Results)), + } + + for _, t := range res.Results { + ret.Errors[t.Name] = makeError(t.ErrorCode, t.ErrorMessage) + } + + return ret, nil +} + +type TopicPartitionsConfig struct { + // Topic name + Name string + + // Topic partition's count. + Count int32 + + // TopicPartitionAssignments among kafka brokers for this topic partitions. + TopicPartitionAssignments []TopicPartitionAssignment +} + +func (t *TopicPartitionsConfig) assignments() []createpartitions.RequestAssignment { + if len(t.TopicPartitionAssignments) == 0 { + return nil + } + assignments := make([]createpartitions.RequestAssignment, len(t.TopicPartitionAssignments)) + for i, a := range t.TopicPartitionAssignments { + assignments[i] = createpartitions.RequestAssignment{ + BrokerIDs: a.BrokerIDs, + } + } + return assignments +} + +type TopicPartitionAssignment struct { + // Broker IDs + BrokerIDs []int32 +} diff --git a/vendor/github.com/segmentio/kafka-go/createtopics.go b/vendor/github.com/segmentio/kafka-go/createtopics.go new file mode 100644 index 00000000000..8ad9ebf441a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/createtopics.go @@ -0,0 +1,390 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/createtopics" +) + +// CreateTopicRequests represents a request sent to a kafka broker to create +// new topics. +type CreateTopicsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of topics to create and their configuration. + Topics []TopicConfig + + // When set to true, topics are not created but the configuration is + // validated as if they were. + // + // This field will be ignored if the kafka broker did not support the + // CreateTopics API in version 1 or above. + ValidateOnly bool +} + +// CreateTopicResponse represents a response from a kafka broker to a topic +// creation request. +type CreateTopicsResponse struct { + // The amount of time that the broker throttled the request. + // + // This field will be zero if the kafka broker did not support the + // CreateTopics API in version 2 or above. + Throttle time.Duration + + // Mapping of topic names to errors that occurred while attempting to create + // the topics. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[string]error +} + +// CreateTopics sends a topic creation request to a kafka broker and returns the +// response. +func (c *Client) CreateTopics(ctx context.Context, req *CreateTopicsRequest) (*CreateTopicsResponse, error) { + topics := make([]createtopics.RequestTopic, len(req.Topics)) + + for i, t := range req.Topics { + topics[i] = createtopics.RequestTopic{ + Name: t.Topic, + NumPartitions: int32(t.NumPartitions), + ReplicationFactor: int16(t.ReplicationFactor), + Assignments: t.assignments(), + Configs: t.configs(), + } + } + + m, err := c.roundTrip(ctx, req.Addr, &createtopics.Request{ + Topics: topics, + TimeoutMs: c.timeoutMs(ctx, defaultCreateTopicsTimeout), + ValidateOnly: req.ValidateOnly, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).CreateTopics: %w", err) + } + + res := m.(*createtopics.Response) + ret := &CreateTopicsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make(map[string]error, len(res.Topics)), + } + + for _, t := range res.Topics { + ret.Errors[t.Name] = makeError(t.ErrorCode, t.ErrorMessage) + } + + return ret, nil +} + +type ConfigEntry struct { + ConfigName string + ConfigValue string +} + +func (c ConfigEntry) toCreateTopicsRequestV0ConfigEntry() createTopicsRequestV0ConfigEntry { + return createTopicsRequestV0ConfigEntry(c) +} + +type createTopicsRequestV0ConfigEntry struct { + ConfigName string + ConfigValue string +} + +func (t createTopicsRequestV0ConfigEntry) size() int32 { + return sizeofString(t.ConfigName) + + sizeofString(t.ConfigValue) +} + +func (t createTopicsRequestV0ConfigEntry) writeTo(wb *writeBuffer) { + wb.writeString(t.ConfigName) + wb.writeString(t.ConfigValue) +} + +type ReplicaAssignment struct { + Partition int + // The list of brokers where the partition should be allocated. There must + // be as many entries in thie list as there are replicas of the partition. + // The first entry represents the broker that will be the preferred leader + // for the partition. + // + // This field changed in 0.4 from `int` to `[]int`. It was invalid to pass + // a single integer as this is supposed to be a list. While this introduces + // a breaking change, it probably never worked before. + Replicas []int +} + +func (a *ReplicaAssignment) partitionIndex() int32 { + return int32(a.Partition) +} + +func (a *ReplicaAssignment) brokerIDs() []int32 { + if len(a.Replicas) == 0 { + return nil + } + replicas := make([]int32, len(a.Replicas)) + for i, r := range a.Replicas { + replicas[i] = int32(r) + } + return replicas +} + +func (a ReplicaAssignment) toCreateTopicsRequestV0ReplicaAssignment() createTopicsRequestV0ReplicaAssignment { + return createTopicsRequestV0ReplicaAssignment{ + Partition: int32(a.Partition), + Replicas: a.brokerIDs(), + } +} + +type createTopicsRequestV0ReplicaAssignment struct { + Partition int32 + Replicas []int32 +} + +func (t createTopicsRequestV0ReplicaAssignment) size() int32 { + return sizeofInt32(t.Partition) + + (int32(len(t.Replicas)+1) * sizeofInt32(0)) // N+1 because the array length is a int32 +} + +func (t createTopicsRequestV0ReplicaAssignment) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt32(int32(len(t.Replicas))) + for _, r := range t.Replicas { + wb.writeInt32(int32(r)) + } +} + +type TopicConfig struct { + // Topic name + Topic string + + // NumPartitions created. -1 indicates unset. + NumPartitions int + + // ReplicationFactor for the topic. -1 indicates unset. + ReplicationFactor int + + // ReplicaAssignments among kafka brokers for this topic partitions. If this + // is set num_partitions and replication_factor must be unset. + ReplicaAssignments []ReplicaAssignment + + // ConfigEntries holds topic level configuration for topic to be set. + ConfigEntries []ConfigEntry +} + +func (t *TopicConfig) assignments() []createtopics.RequestAssignment { + if len(t.ReplicaAssignments) == 0 { + return nil + } + assignments := make([]createtopics.RequestAssignment, len(t.ReplicaAssignments)) + for i, a := range t.ReplicaAssignments { + assignments[i] = createtopics.RequestAssignment{ + PartitionIndex: a.partitionIndex(), + BrokerIDs: a.brokerIDs(), + } + } + return assignments +} + +func (t *TopicConfig) configs() []createtopics.RequestConfig { + if len(t.ConfigEntries) == 0 { + return nil + } + configs := make([]createtopics.RequestConfig, len(t.ConfigEntries)) + for i, c := range t.ConfigEntries { + configs[i] = createtopics.RequestConfig{ + Name: c.ConfigName, + Value: c.ConfigValue, + } + } + return configs +} + +func (t TopicConfig) toCreateTopicsRequestV0Topic() createTopicsRequestV0Topic { + requestV0ReplicaAssignments := make([]createTopicsRequestV0ReplicaAssignment, 0, len(t.ReplicaAssignments)) + for _, a := range t.ReplicaAssignments { + requestV0ReplicaAssignments = append( + requestV0ReplicaAssignments, + a.toCreateTopicsRequestV0ReplicaAssignment()) + } + requestV0ConfigEntries := make([]createTopicsRequestV0ConfigEntry, 0, len(t.ConfigEntries)) + for _, c := range t.ConfigEntries { + requestV0ConfigEntries = append( + requestV0ConfigEntries, + c.toCreateTopicsRequestV0ConfigEntry()) + } + + return createTopicsRequestV0Topic{ + Topic: t.Topic, + NumPartitions: int32(t.NumPartitions), + ReplicationFactor: int16(t.ReplicationFactor), + ReplicaAssignments: requestV0ReplicaAssignments, + ConfigEntries: requestV0ConfigEntries, + } +} + +type createTopicsRequestV0Topic struct { + // Topic name + Topic string + + // NumPartitions created. -1 indicates unset. + NumPartitions int32 + + // ReplicationFactor for the topic. -1 indicates unset. + ReplicationFactor int16 + + // ReplicaAssignments among kafka brokers for this topic partitions. If this + // is set num_partitions and replication_factor must be unset. + ReplicaAssignments []createTopicsRequestV0ReplicaAssignment + + // ConfigEntries holds topic level configuration for topic to be set. + ConfigEntries []createTopicsRequestV0ConfigEntry +} + +func (t createTopicsRequestV0Topic) size() int32 { + return sizeofString(t.Topic) + + sizeofInt32(t.NumPartitions) + + sizeofInt16(t.ReplicationFactor) + + sizeofArray(len(t.ReplicaAssignments), func(i int) int32 { return t.ReplicaAssignments[i].size() }) + + sizeofArray(len(t.ConfigEntries), func(i int) int32 { return t.ConfigEntries[i].size() }) +} + +func (t createTopicsRequestV0Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt32(t.NumPartitions) + wb.writeInt16(t.ReplicationFactor) + wb.writeArray(len(t.ReplicaAssignments), func(i int) { t.ReplicaAssignments[i].writeTo(wb) }) + wb.writeArray(len(t.ConfigEntries), func(i int) { t.ConfigEntries[i].writeTo(wb) }) +} + +// See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics +type createTopicsRequestV0 struct { + // Topics contains n array of single topic creation requests. Can not + // have multiple entries for the same topic. + Topics []createTopicsRequestV0Topic + + // Timeout ms to wait for a topic to be completely created on the + // controller node. Values <= 0 will trigger topic creation and return immediately + Timeout int32 +} + +func (t createTopicsRequestV0) size() int32 { + return sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) + + sizeofInt32(t.Timeout) +} + +func (t createTopicsRequestV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) + wb.writeInt32(t.Timeout) +} + +type createTopicsResponseV0TopicError struct { + // Topic name + Topic string + + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t createTopicsResponseV0TopicError) size() int32 { + return sizeofString(t.Topic) + + sizeofInt16(t.ErrorCode) +} + +func (t createTopicsResponseV0TopicError) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt16(t.ErrorCode) +} + +func (t *createTopicsResponseV0TopicError) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.Topic); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + return +} + +// See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics +type createTopicsResponseV0 struct { + TopicErrors []createTopicsResponseV0TopicError +} + +func (t createTopicsResponseV0) size() int32 { + return sizeofArray(len(t.TopicErrors), func(i int) int32 { return t.TopicErrors[i].size() }) +} + +func (t createTopicsResponseV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.TopicErrors), func(i int) { t.TopicErrors[i].writeTo(wb) }) +} + +func (t *createTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var topic createTopicsResponseV0TopicError + if fnRemain, fnErr = (&topic).readFrom(r, size); err != nil { + return + } + t.TopicErrors = append(t.TopicErrors, topic) + return + } + if remain, err = readArrayWith(r, size, fn); err != nil { + return + } + + return +} + +func (c *Conn) createTopics(request createTopicsRequestV0) (createTopicsResponseV0, error) { + var response createTopicsResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + if request.Timeout == 0 { + now := time.Now() + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + request.Timeout = milliseconds(deadlineToTimeout(deadline, now)) + } + return c.writeRequest(createTopics, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return response, err + } + for _, tr := range response.TopicErrors { + if tr.ErrorCode == int16(TopicAlreadyExists) { + continue + } + if tr.ErrorCode != 0 { + return response, Error(tr.ErrorCode) + } + } + + return response, nil +} + +// CreateTopics creates one topic per provided configuration with idempotent +// operational semantics. In other words, if CreateTopics is invoked with a +// configuration for an existing topic, it will have no effect. +func (c *Conn) CreateTopics(topics ...TopicConfig) error { + requestV0Topics := make([]createTopicsRequestV0Topic, 0, len(topics)) + for _, t := range topics { + requestV0Topics = append( + requestV0Topics, + t.toCreateTopicsRequestV0Topic()) + } + + _, err := c.createTopics(createTopicsRequestV0{ + Topics: requestV0Topics, + }) + return err +} diff --git a/vendor/github.com/segmentio/kafka-go/deletegroups.go b/vendor/github.com/segmentio/kafka-go/deletegroups.go new file mode 100644 index 00000000000..6317ae7fa5e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/deletegroups.go @@ -0,0 +1,60 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/deletegroups" +) + +// DeleteGroupsRequest represents a request sent to a kafka broker to delete +// consumer groups. +type DeleteGroupsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Identifiers of groups to delete. + GroupIDs []string +} + +// DeleteGroupsResponse represents a response from a kafka broker to a consumer group +// deletion request. +type DeleteGroupsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Mapping of group ids to errors that occurred while attempting to delete those groups. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[string]error +} + +// DeleteGroups sends a delete groups request and returns the response. The request is sent to the group coordinator of the first group +// of the request. All deleted groups must be managed by the same group coordinator. +func (c *Client) DeleteGroups( + ctx context.Context, + req *DeleteGroupsRequest, +) (*DeleteGroupsResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &deletegroups.Request{ + GroupIDs: req.GroupIDs, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DeleteGroups: %w", err) + } + + r := m.(*deletegroups.Response) + + ret := &DeleteGroupsResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Errors: make(map[string]error, len(r.Responses)), + } + + for _, t := range r.Responses { + ret.Errors[t.GroupID] = makeError(t.ErrorCode, "") + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/deletetopics.go b/vendor/github.com/segmentio/kafka-go/deletetopics.go new file mode 100644 index 00000000000..d758d9fd6a4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/deletetopics.go @@ -0,0 +1,175 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/deletetopics" +) + +// DeleteTopicsRequest represents a request sent to a kafka broker to delete +// topics. +type DeleteTopicsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Names of topics to delete. + Topics []string +} + +// DeleteTopicsResponse represents a response from a kafka broker to a topic +// deletion request. +type DeleteTopicsResponse struct { + // The amount of time that the broker throttled the request. + // + // This field will be zero if the kafka broker did not support the + // DeleteTopics API in version 1 or above. + Throttle time.Duration + + // Mapping of topic names to errors that occurred while attempting to delete + // the topics. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Errors map[string]error +} + +// DeleteTopics sends a topic deletion request to a kafka broker and returns the +// response. +func (c *Client) DeleteTopics(ctx context.Context, req *DeleteTopicsRequest) (*DeleteTopicsResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &deletetopics.Request{ + TopicNames: req.Topics, + TimeoutMs: c.timeoutMs(ctx, defaultDeleteTopicsTimeout), + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DeleteTopics: %w", err) + } + + res := m.(*deletetopics.Response) + ret := &DeleteTopicsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Errors: make(map[string]error, len(res.Responses)), + } + + for _, t := range res.Responses { + if t.ErrorCode == 0 { + ret.Errors[t.Name] = nil + } else { + ret.Errors[t.Name] = Error(t.ErrorCode) + } + } + + return ret, nil +} + +// See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics +type deleteTopicsRequestV0 struct { + // Topics holds the topic names + Topics []string + + // Timeout holds the time in ms to wait for a topic to be completely deleted + // on the controller node. Values <= 0 will trigger topic deletion and return + // immediately. + Timeout int32 +} + +func (t deleteTopicsRequestV0) size() int32 { + return sizeofStringArray(t.Topics) + + sizeofInt32(t.Timeout) +} + +func (t deleteTopicsRequestV0) writeTo(wb *writeBuffer) { + wb.writeStringArray(t.Topics) + wb.writeInt32(t.Timeout) +} + +type deleteTopicsResponseV0 struct { + // TopicErrorCodes holds per topic error codes + TopicErrorCodes []deleteTopicsResponseV0TopicErrorCode +} + +func (t deleteTopicsResponseV0) size() int32 { + return sizeofArray(len(t.TopicErrorCodes), func(i int) int32 { return t.TopicErrorCodes[i].size() }) +} + +func (t *deleteTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + var item deleteTopicsResponseV0TopicErrorCode + if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil { + return + } + t.TopicErrorCodes = append(t.TopicErrorCodes, item) + return + } + if remain, err = readArrayWith(r, size, fn); err != nil { + return + } + return +} + +func (t deleteTopicsResponseV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.TopicErrorCodes), func(i int) { t.TopicErrorCodes[i].writeTo(wb) }) +} + +type deleteTopicsResponseV0TopicErrorCode struct { + // Topic holds the topic name + Topic string + + // ErrorCode holds the error code + ErrorCode int16 +} + +func (t deleteTopicsResponseV0TopicErrorCode) size() int32 { + return sizeofString(t.Topic) + + sizeofInt16(t.ErrorCode) +} + +func (t *deleteTopicsResponseV0TopicErrorCode) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.Topic); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + return +} + +func (t deleteTopicsResponseV0TopicErrorCode) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt16(t.ErrorCode) +} + +// deleteTopics deletes the specified topics. +// +// See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics +func (c *Conn) deleteTopics(request deleteTopicsRequestV0) (deleteTopicsResponseV0, error) { + var response deleteTopicsResponseV0 + err := c.writeOperation( + func(deadline time.Time, id int32) error { + if request.Timeout == 0 { + now := time.Now() + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + request.Timeout = milliseconds(deadlineToTimeout(deadline, now)) + } + return c.writeRequest(deleteTopics, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err != nil { + return deleteTopicsResponseV0{}, err + } + for _, c := range response.TopicErrorCodes { + if c.ErrorCode != 0 { + return response, Error(c.ErrorCode) + } + } + return response, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/describeclientquotas.go b/vendor/github.com/segmentio/kafka-go/describeclientquotas.go new file mode 100644 index 00000000000..6291dcd986f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/describeclientquotas.go @@ -0,0 +1,126 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/describeclientquotas" +) + +// DescribeClientQuotasRequest represents a request sent to a kafka broker to +// describe client quotas. +type DescribeClientQuotasRequest struct { + // Address of the kafka broker to send the request to + Addr net.Addr + + // List of quota components to describe. + Components []DescribeClientQuotasRequestComponent + + // Whether the match is strict, i.e. should exclude entities with + // unspecified entity types. + Strict bool +} + +type DescribeClientQuotasRequestComponent struct { + // The entity type that the filter component applies to. + EntityType string + + // How to match the entity (0 = exact name, 1 = default name, + // 2 = any specified name). + MatchType int8 + + // The string to match against, or null if unused for the match type. + Match string +} + +// DescribeClientQuotasReesponse represents a response from a kafka broker to a describe client quota request. +type DescribeClientQuotasResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Error is set to a non-nil value including the code and message if a top-level + // error was encountered when doing the update. + Error error + + // List of describe client quota responses. + Entries []DescribeClientQuotasResponseQuotas +} + +type DescribeClientQuotasEntity struct { + // The quota entity type. + EntityType string + + // The name of the quota entity, or null if the default. + EntityName string +} + +type DescribeClientQuotasValue struct { + // The quota configuration key. + Key string + + // The quota configuration value. + Value float64 +} + +type DescribeClientQuotasResponseQuotas struct { + // List of client quota entities and their descriptions. + Entities []DescribeClientQuotasEntity + + // The client quota configuration values. + Values []DescribeClientQuotasValue +} + +// DescribeClientQuotas sends a describe client quotas request to a kafka broker and returns +// the response. +func (c *Client) DescribeClientQuotas(ctx context.Context, req *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) { + components := make([]describeclientquotas.Component, len(req.Components)) + + for componentIdx, component := range req.Components { + components[componentIdx] = describeclientquotas.Component{ + EntityType: component.EntityType, + MatchType: component.MatchType, + Match: component.Match, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &describeclientquotas.Request{ + Components: components, + Strict: req.Strict, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DescribeClientQuotas: %w", err) + } + + res := m.(*describeclientquotas.Response) + responseEntries := make([]DescribeClientQuotasResponseQuotas, len(res.Entries)) + + for responseEntryIdx, responseEntry := range res.Entries { + responseEntities := make([]DescribeClientQuotasEntity, len(responseEntry.Entities)) + for responseEntityIdx, responseEntity := range responseEntry.Entities { + responseEntities[responseEntityIdx] = DescribeClientQuotasEntity{ + EntityType: responseEntity.EntityType, + EntityName: responseEntity.EntityName, + } + } + + responseValues := make([]DescribeClientQuotasValue, len(responseEntry.Values)) + for responseValueIdx, responseValue := range responseEntry.Values { + responseValues[responseValueIdx] = DescribeClientQuotasValue{ + Key: responseValue.Key, + Value: responseValue.Value, + } + } + responseEntries[responseEntryIdx] = DescribeClientQuotasResponseQuotas{ + Entities: responseEntities, + Values: responseValues, + } + } + ret := &DescribeClientQuotasResponse{ + Throttle: time.Duration(res.ThrottleTimeMs), + Entries: responseEntries, + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/describeconfigs.go b/vendor/github.com/segmentio/kafka-go/describeconfigs.go new file mode 100644 index 00000000000..17f4f305fdf --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/describeconfigs.go @@ -0,0 +1,162 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/describeconfigs" +) + +// DescribeConfigsRequest represents a request sent to a kafka broker to describe configs. +type DescribeConfigsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // List of resources to get details for. + Resources []DescribeConfigRequestResource + + // Ignored if API version is less than v1 + IncludeSynonyms bool + + // Ignored if API version is less than v3 + IncludeDocumentation bool +} + +type DescribeConfigRequestResource struct { + // Resource Type + ResourceType ResourceType + + // Resource Name + ResourceName string + + // ConfigNames is a list of configurations to update. + ConfigNames []string +} + +// DescribeConfigsResponse represents a response from a kafka broker to a describe config request. +type DescribeConfigsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Resources + Resources []DescribeConfigResponseResource +} + +// DescribeConfigResponseResource. +type DescribeConfigResponseResource struct { + // Resource Type + ResourceType int8 + + // Resource Name + ResourceName string + + // Error + Error error + + // ConfigEntries + ConfigEntries []DescribeConfigResponseConfigEntry +} + +// DescribeConfigResponseConfigEntry. +type DescribeConfigResponseConfigEntry struct { + ConfigName string + ConfigValue string + ReadOnly bool + + // Ignored if API version is greater than v0 + IsDefault bool + + // Ignored if API version is less than v1 + ConfigSource int8 + + IsSensitive bool + + // Ignored if API version is less than v1 + ConfigSynonyms []DescribeConfigResponseConfigSynonym + + // Ignored if API version is less than v3 + ConfigType int8 + + // Ignored if API version is less than v3 + ConfigDocumentation string +} + +// DescribeConfigResponseConfigSynonym. +type DescribeConfigResponseConfigSynonym struct { + // Ignored if API version is less than v1 + ConfigName string + + // Ignored if API version is less than v1 + ConfigValue string + + // Ignored if API version is less than v1 + ConfigSource int8 +} + +// DescribeConfigs sends a config altering request to a kafka broker and returns the +// response. +func (c *Client) DescribeConfigs(ctx context.Context, req *DescribeConfigsRequest) (*DescribeConfigsResponse, error) { + resources := make([]describeconfigs.RequestResource, len(req.Resources)) + + for i, t := range req.Resources { + resources[i] = describeconfigs.RequestResource{ + ResourceType: int8(t.ResourceType), + ResourceName: t.ResourceName, + ConfigNames: t.ConfigNames, + } + } + + m, err := c.roundTrip(ctx, req.Addr, &describeconfigs.Request{ + Resources: resources, + IncludeSynonyms: req.IncludeSynonyms, + IncludeDocumentation: req.IncludeDocumentation, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).DescribeConfigs: %w", err) + } + + res := m.(*describeconfigs.Response) + ret := &DescribeConfigsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Resources: make([]DescribeConfigResponseResource, len(res.Resources)), + } + + for i, t := range res.Resources { + + configEntries := make([]DescribeConfigResponseConfigEntry, len(t.ConfigEntries)) + for j, v := range t.ConfigEntries { + + configSynonyms := make([]DescribeConfigResponseConfigSynonym, len(v.ConfigSynonyms)) + for k, cs := range v.ConfigSynonyms { + configSynonyms[k] = DescribeConfigResponseConfigSynonym{ + ConfigName: cs.ConfigName, + ConfigValue: cs.ConfigValue, + ConfigSource: cs.ConfigSource, + } + } + + configEntries[j] = DescribeConfigResponseConfigEntry{ + ConfigName: v.ConfigName, + ConfigValue: v.ConfigValue, + ReadOnly: v.ReadOnly, + ConfigSource: v.ConfigSource, + IsDefault: v.IsDefault, + IsSensitive: v.IsSensitive, + ConfigSynonyms: configSynonyms, + ConfigType: v.ConfigType, + ConfigDocumentation: v.ConfigDocumentation, + } + } + + ret.Resources[i] = DescribeConfigResponseResource{ + ResourceType: t.ResourceType, + ResourceName: t.ResourceName, + Error: makeError(t.ErrorCode, t.ErrorMessage), + ConfigEntries: configEntries, + } + } + + return ret, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/describegroups.go b/vendor/github.com/segmentio/kafka-go/describegroups.go new file mode 100644 index 00000000000..4faf7a01bec --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/describegroups.go @@ -0,0 +1,298 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + + "github.com/segmentio/kafka-go/protocol/describegroups" +) + +// DescribeGroupsRequest is a request to the DescribeGroups API. +type DescribeGroupsRequest struct { + // Addr is the address of the kafka broker to send the request to. + Addr net.Addr + + // GroupIDs is a slice of groups to get details for. + GroupIDs []string +} + +// DescribeGroupsResponse is a response from the DescribeGroups API. +type DescribeGroupsResponse struct { + // Groups is a slice of details for the requested groups. + Groups []DescribeGroupsResponseGroup +} + +// DescribeGroupsResponseGroup contains the response details for a single group. +type DescribeGroupsResponseGroup struct { + // Error is set to a non-nil value if there was an error fetching the details + // for this group. + Error error + + // GroupID is the ID of the group. + GroupID string + + // GroupState is a description of the group state. + GroupState string + + // Members contains details about each member of the group. + Members []DescribeGroupsResponseMember +} + +// MemberInfo represents the membership information for a single group member. +type DescribeGroupsResponseMember struct { + // MemberID is the ID of the group member. + MemberID string + + // ClientID is the ID of the client that the group member is using. + ClientID string + + // ClientHost is the host of the client that the group member is connecting from. + ClientHost string + + // MemberMetadata contains metadata about this group member. + MemberMetadata DescribeGroupsResponseMemberMetadata + + // MemberAssignments contains the topic partitions that this member is assigned to. + MemberAssignments DescribeGroupsResponseAssignments +} + +// GroupMemberMetadata stores metadata associated with a group member. +type DescribeGroupsResponseMemberMetadata struct { + // Version is the version of the metadata. + Version int + + // Topics is the list of topics that the member is assigned to. + Topics []string + + // UserData is the user data for the member. + UserData []byte + + // OwnedPartitions contains the partitions owned by this group member; only set if + // consumers are using a cooperative rebalancing assignor protocol. + OwnedPartitions []DescribeGroupsResponseMemberMetadataOwnedPartition +} + +type DescribeGroupsResponseMemberMetadataOwnedPartition struct { + // Topic is the name of the topic. + Topic string + + // Partitions is the partitions that are owned by the group in the topic. + Partitions []int +} + +// GroupMemberAssignmentsInfo stores the topic partition assignment data for a group member. +type DescribeGroupsResponseAssignments struct { + // Version is the version of the assignments data. + Version int + + // Topics contains the details of the partition assignments for each topic. + Topics []GroupMemberTopic + + // UserData is the user data for the member. + UserData []byte +} + +// GroupMemberTopic is a mapping from a topic to a list of partitions in the topic. It is used +// to represent the topic partitions that have been assigned to a group member. +type GroupMemberTopic struct { + // Topic is the name of the topic. + Topic string + + // Partitions is a slice of partition IDs that this member is assigned to in the topic. + Partitions []int +} + +// DescribeGroups calls the Kafka DescribeGroups API to get information about one or more +// consumer groups. See https://kafka.apache.org/protocol#The_Messages_DescribeGroups for +// more information. +func (c *Client) DescribeGroups( + ctx context.Context, + req *DescribeGroupsRequest, +) (*DescribeGroupsResponse, error) { + protoResp, err := c.roundTrip( + ctx, + req.Addr, + &describegroups.Request{ + Groups: req.GroupIDs, + }, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*describegroups.Response) + resp := &DescribeGroupsResponse{} + + for _, apiGroup := range apiResp.Groups { + group := DescribeGroupsResponseGroup{ + Error: makeError(apiGroup.ErrorCode, ""), + GroupID: apiGroup.GroupID, + GroupState: apiGroup.GroupState, + } + + for _, member := range apiGroup.Members { + decodedMetadata, err := decodeMemberMetadata(member.MemberMetadata) + if err != nil { + return nil, err + } + decodedAssignments, err := decodeMemberAssignments(member.MemberAssignment) + if err != nil { + return nil, err + } + + group.Members = append(group.Members, DescribeGroupsResponseMember{ + MemberID: member.MemberID, + ClientID: member.ClientID, + ClientHost: member.ClientHost, + MemberAssignments: decodedAssignments, + MemberMetadata: decodedMetadata, + }) + } + resp.Groups = append(resp.Groups, group) + } + + return resp, nil +} + +// decodeMemberMetadata converts raw metadata bytes to a +// DescribeGroupsResponseMemberMetadata struct. +// +// See https://github.com/apache/kafka/blob/2.4/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java#L49 +// for protocol details. +func decodeMemberMetadata(rawMetadata []byte) (DescribeGroupsResponseMemberMetadata, error) { + mm := DescribeGroupsResponseMemberMetadata{} + + if len(rawMetadata) == 0 { + return mm, nil + } + + buf := bytes.NewBuffer(rawMetadata) + bufReader := bufio.NewReader(buf) + remain := len(rawMetadata) + + var err error + var version16 int16 + + if remain, err = readInt16(bufReader, remain, &version16); err != nil { + return mm, err + } + mm.Version = int(version16) + + if remain, err = readStringArray(bufReader, remain, &mm.Topics); err != nil { + return mm, err + } + if remain, err = readBytes(bufReader, remain, &mm.UserData); err != nil { + return mm, err + } + + if mm.Version == 1 && remain > 0 { + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + op := DescribeGroupsResponseMemberMetadataOwnedPartition{} + if fnRemain, fnErr = readString(r, size, &op.Topic); fnErr != nil { + return + } + + ps := []int32{} + if fnRemain, fnErr = readInt32Array(r, fnRemain, &ps); fnErr != nil { + return + } + + for _, p := range ps { + op.Partitions = append(op.Partitions, int(p)) + } + + mm.OwnedPartitions = append(mm.OwnedPartitions, op) + return + } + + if remain, err = readArrayWith(bufReader, remain, fn); err != nil { + return mm, err + } + } + + if remain != 0 { + return mm, fmt.Errorf("Got non-zero number of bytes remaining: %d", remain) + } + + return mm, nil +} + +// decodeMemberAssignments converts raw assignment bytes to a DescribeGroupsResponseAssignments +// struct. +// +// See https://github.com/apache/kafka/blob/2.4/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerProtocol.java#L49 +// for protocol details. +func decodeMemberAssignments(rawAssignments []byte) (DescribeGroupsResponseAssignments, error) { + ma := DescribeGroupsResponseAssignments{} + + if len(rawAssignments) == 0 { + return ma, nil + } + + buf := bytes.NewBuffer(rawAssignments) + bufReader := bufio.NewReader(buf) + remain := len(rawAssignments) + + var err error + var version16 int16 + + if remain, err = readInt16(bufReader, remain, &version16); err != nil { + return ma, err + } + ma.Version = int(version16) + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + item := GroupMemberTopic{} + + if fnRemain, fnErr = readString(r, size, &item.Topic); fnErr != nil { + return + } + + partitions := []int32{} + + if fnRemain, fnErr = readInt32Array(r, fnRemain, &partitions); fnErr != nil { + return + } + for _, partition := range partitions { + item.Partitions = append(item.Partitions, int(partition)) + } + + ma.Topics = append(ma.Topics, item) + return + } + if remain, err = readArrayWith(bufReader, remain, fn); err != nil { + return ma, err + } + + if remain, err = readBytes(bufReader, remain, &ma.UserData); err != nil { + return ma, err + } + + if remain != 0 { + return ma, fmt.Errorf("Got non-zero number of bytes remaining: %d", remain) + } + + return ma, nil +} + +// readInt32Array reads an array of int32s. It's adapted from the implementation of +// readStringArray. +func readInt32Array(r *bufio.Reader, sz int, v *[]int32) (remain int, err error) { + var content []int32 + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var value int32 + if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil { + return + } + content = append(content, value) + return + } + if remain, err = readArrayWith(r, sz, fn); err != nil { + return + } + + *v = content + return +} diff --git a/vendor/github.com/segmentio/kafka-go/dialer.go b/vendor/github.com/segmentio/kafka-go/dialer.go new file mode 100644 index 00000000000..7786ed3200d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/dialer.go @@ -0,0 +1,493 @@ +package kafka + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "time" + + "github.com/segmentio/kafka-go/sasl" +) + +// The Dialer type mirrors the net.Dialer API but is designed to open kafka +// connections instead of raw network connections. +type Dialer struct { + // Unique identifier for client connections established by this Dialer. + ClientID string + + // Optionally specifies the function that the dialer uses to establish + // network connections. If nil, net.(*Dialer).DialContext is used instead. + // + // When DialFunc is set, LocalAddr, DualStack, FallbackDelay, and KeepAlive + // are ignored. + DialFunc func(ctx context.Context, network string, address string) (net.Conn, error) + + // Timeout is the maximum amount of time a dial will wait for a connect to + // complete. If Deadline is also set, it may fail earlier. + // + // The default is no timeout. + // + // When dialing a name with multiple IP addresses, the timeout may be + // divided between them. + // + // With or without a timeout, the operating system may impose its own + // earlier timeout. For instance, TCP timeouts are often around 3 minutes. + Timeout time.Duration + + // Deadline is the absolute point in time after which dials will fail. + // If Timeout is set, it may fail earlier. + // Zero means no deadline, or dependent on the operating system as with the + // Timeout option. + Deadline time.Time + + // LocalAddr is the local address to use when dialing an address. + // The address must be of a compatible type for the network being dialed. + // If nil, a local address is automatically chosen. + LocalAddr net.Addr + + // DualStack enables RFC 6555-compliant "Happy Eyeballs" dialing when the + // network is "tcp" and the destination is a host name with both IPv4 and + // IPv6 addresses. This allows a client to tolerate networks where one + // address family is silently broken. + DualStack bool + + // FallbackDelay specifies the length of time to wait before spawning a + // fallback connection, when DualStack is enabled. + // If zero, a default delay of 300ms is used. + FallbackDelay time.Duration + + // KeepAlive specifies the keep-alive period for an active network + // connection. + // If zero, keep-alives are not enabled. Network protocols that do not + // support keep-alives ignore this field. + KeepAlive time.Duration + + // Resolver optionally gives a hook to convert the broker address into an + // alternate host or IP address which is useful for custom service discovery. + // If a custom resolver returns any possible hosts, the first one will be + // used and the original discarded. If a port number is included with the + // resolved host, it will only be used if a port number was not previously + // specified. If no port is specified or resolved, the default of 9092 will be + // used. + Resolver Resolver + + // TLS enables Dialer to open secure connections. If nil, standard net.Conn + // will be used. + TLS *tls.Config + + // SASLMechanism configures the Dialer to use SASL authentication. If nil, + // no authentication will be performed. + SASLMechanism sasl.Mechanism + + // The transactional id to use for transactional delivery. Idempotent + // deliver should be enabled if transactional id is configured. + // For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs + // Empty string means that the connection will be non-transactional. + TransactionalID string +} + +// Dial connects to the address on the named network. +func (d *Dialer) Dial(network string, address string) (*Conn, error) { + return d.DialContext(context.Background(), network, address) +} + +// DialContext connects to the address on the named network using the provided +// context. +// +// The provided Context must be non-nil. If the context expires before the +// connection is complete, an error is returned. Once successfully connected, +// any expiration of the context will not affect the connection. +// +// When using TCP, and the host in the address parameter resolves to multiple +// network addresses, any dial timeout (from d.Timeout or ctx) is spread over +// each consecutive dial, such that each is given an appropriate fraction of the +// time to connect. For example, if a host has 4 IP addresses and the timeout is +// 1 minute, the connect to each single address will be given 15 seconds to +// complete before trying the next one. +func (d *Dialer) DialContext(ctx context.Context, network string, address string) (*Conn, error) { + return d.connect( + ctx, + network, + address, + ConnConfig{ + ClientID: d.ClientID, + TransactionalID: d.TransactionalID, + }, + ) +} + +// DialLeader opens a connection to the leader of the partition for a given +// topic. +// +// The address given to the DialContext method may not be the one that the +// connection will end up being established to, because the dialer will lookup +// the partition leader for the topic and return a connection to that server. +// The original address is only used as a mechanism to discover the +// configuration of the kafka cluster that we're connecting to. +func (d *Dialer) DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) { + p, err := d.LookupPartition(ctx, network, address, topic, partition) + if err != nil { + return nil, err + } + return d.DialPartition(ctx, network, address, p) +} + +// DialPartition opens a connection to the leader of the partition specified by partition +// descriptor. It's strongly advised to use descriptor of the partition that comes out of +// functions LookupPartition or LookupPartitions. +func (d *Dialer) DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) { + return d.connect(ctx, network, net.JoinHostPort(partition.Leader.Host, strconv.Itoa(partition.Leader.Port)), ConnConfig{ + ClientID: d.ClientID, + Topic: partition.Topic, + Partition: partition.ID, + Broker: partition.Leader.ID, + Rack: partition.Leader.Rack, + TransactionalID: d.TransactionalID, + }) +} + +// LookupLeader searches for the kafka broker that is the leader of the +// partition for a given topic, returning a Broker value representing it. +func (d *Dialer) LookupLeader(ctx context.Context, network string, address string, topic string, partition int) (Broker, error) { + p, err := d.LookupPartition(ctx, network, address, topic, partition) + return p.Leader, err +} + +// LookupPartition searches for the description of specified partition id. +func (d *Dialer) LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) { + c, err := d.DialContext(ctx, network, address) + if err != nil { + return Partition{}, err + } + defer c.Close() + + brkch := make(chan Partition, 1) + errch := make(chan error, 1) + + go func() { + for attempt := 0; true; attempt++ { + if attempt != 0 { + if !sleep(ctx, backoff(attempt, 100*time.Millisecond, 10*time.Second)) { + errch <- ctx.Err() + return + } + } + + partitions, err := c.ReadPartitions(topic) + if err != nil { + if isTemporary(err) { + continue + } + errch <- err + return + } + + for _, p := range partitions { + if p.ID == partition { + brkch <- p + return + } + } + } + + errch <- UnknownTopicOrPartition + }() + + var prt Partition + select { + case prt = <-brkch: + case err = <-errch: + case <-ctx.Done(): + err = ctx.Err() + } + return prt, err +} + +// LookupPartitions returns the list of partitions that exist for the given topic. +func (d *Dialer) LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) { + conn, err := d.DialContext(ctx, network, address) + if err != nil { + return nil, err + } + defer conn.Close() + + prtch := make(chan []Partition, 1) + errch := make(chan error, 1) + + go func() { + if prt, err := conn.ReadPartitions(topic); err != nil { + errch <- err + } else { + prtch <- prt + } + }() + + var prt []Partition + select { + case prt = <-prtch: + case err = <-errch: + case <-ctx.Done(): + err = ctx.Err() + } + return prt, err +} + +// connectTLS returns a tls.Conn that has already completed the Handshake. +func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn, config *tls.Config) (tlsConn *tls.Conn, err error) { + tlsConn = tls.Client(conn, config) + errch := make(chan error) + + go func() { + defer close(errch) + errch <- tlsConn.Handshake() + }() + + select { + case <-ctx.Done(): + conn.Close() + tlsConn.Close() + <-errch // ignore possible error from Handshake + err = ctx.Err() + + case err = <-errch: + } + + return +} + +// connect opens a socket connection to the broker, wraps it to create a +// kafka connection, and performs SASL authentication if configured to do so. +func (d *Dialer) connect(ctx context.Context, network, address string, connCfg ConnConfig) (*Conn, error) { + if d.Timeout != 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d.Timeout) + defer cancel() + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + c, err := d.dialContext(ctx, network, address) + if err != nil { + return nil, fmt.Errorf("failed to dial: %w", err) + } + + conn := NewConnWith(c, connCfg) + + if d.SASLMechanism != nil { + host, port, err := splitHostPortNumber(address) + if err != nil { + return nil, fmt.Errorf("could not determine host/port for SASL authentication: %w", err) + } + metadata := &sasl.Metadata{ + Host: host, + Port: port, + } + if err := d.authenticateSASL(sasl.WithMetadata(ctx, metadata), conn); err != nil { + _ = conn.Close() + return nil, fmt.Errorf("could not successfully authenticate to %s:%d with SASL: %w", host, port, err) + } + } + + return conn, nil +} + +// authenticateSASL performs all of the required requests to authenticate this +// connection. If any step fails, this function returns with an error. A nil +// error indicates successful authentication. +// +// In case of error, this function *does not* close the connection. That is the +// responsibility of the caller. +func (d *Dialer) authenticateSASL(ctx context.Context, conn *Conn) error { + if err := conn.saslHandshake(d.SASLMechanism.Name()); err != nil { + return fmt.Errorf("SASL handshake failed: %w", err) + } + + sess, state, err := d.SASLMechanism.Start(ctx) + if err != nil { + return fmt.Errorf("SASL authentication process could not be started: %w", err) + } + + for completed := false; !completed; { + challenge, err := conn.saslAuthenticate(state) + switch { + case err == nil: + case errors.Is(err, io.EOF): + // the broker may communicate a failed exchange by closing the + // connection (esp. in the case where we're passing opaque sasl + // data over the wire since there's no protocol info). + return SASLAuthenticationFailed + default: + return err + } + + completed, state, err = sess.Next(ctx, challenge) + if err != nil { + return fmt.Errorf("SASL authentication process has failed: %w", err) + } + } + + return nil +} + +func (d *Dialer) dialContext(ctx context.Context, network string, addr string) (net.Conn, error) { + address, err := lookupHost(ctx, addr, d.Resolver) + if err != nil { + return nil, fmt.Errorf("failed to resolve host: %w", err) + } + + dial := d.DialFunc + if dial == nil { + dial = (&net.Dialer{ + LocalAddr: d.LocalAddr, + DualStack: d.DualStack, + FallbackDelay: d.FallbackDelay, + KeepAlive: d.KeepAlive, + }).DialContext + } + + conn, err := dial(ctx, network, address) + if err != nil { + return nil, fmt.Errorf("failed to open connection to %s: %w", address, err) + } + + if d.TLS != nil { + c := d.TLS + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if c.ServerName == "" { + c = d.TLS.Clone() + // Copied from tls.go in the standard library. + colonPos := strings.LastIndex(address, ":") + if colonPos == -1 { + colonPos = len(address) + } + hostname := address[:colonPos] + c.ServerName = hostname + } + return d.connectTLS(ctx, conn, c) + } + + return conn, nil +} + +// DefaultDialer is the default dialer used when none is specified. +var DefaultDialer = &Dialer{ + Timeout: 10 * time.Second, + DualStack: true, +} + +// Dial is a convenience wrapper for DefaultDialer.Dial. +func Dial(network string, address string) (*Conn, error) { + return DefaultDialer.Dial(network, address) +} + +// DialContext is a convenience wrapper for DefaultDialer.DialContext. +func DialContext(ctx context.Context, network string, address string) (*Conn, error) { + return DefaultDialer.DialContext(ctx, network, address) +} + +// DialLeader is a convenience wrapper for DefaultDialer.DialLeader. +func DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) { + return DefaultDialer.DialLeader(ctx, network, address, topic, partition) +} + +// DialPartition is a convenience wrapper for DefaultDialer.DialPartition. +func DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) { + return DefaultDialer.DialPartition(ctx, network, address, partition) +} + +// LookupPartition is a convenience wrapper for DefaultDialer.LookupPartition. +func LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) { + return DefaultDialer.LookupPartition(ctx, network, address, topic, partition) +} + +// LookupPartitions is a convenience wrapper for DefaultDialer.LookupPartitions. +func LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) { + return DefaultDialer.LookupPartitions(ctx, network, address, topic) +} + +func sleep(ctx context.Context, duration time.Duration) bool { + if duration == 0 { + select { + default: + return true + case <-ctx.Done(): + return false + } + } + timer := time.NewTimer(duration) + defer timer.Stop() + select { + case <-timer.C: + return true + case <-ctx.Done(): + return false + } +} + +func backoff(attempt int, min time.Duration, max time.Duration) time.Duration { + d := time.Duration(attempt*attempt) * min + if d > max { + d = max + } + return d +} + +func canonicalAddress(s string) string { + return net.JoinHostPort(splitHostPort(s)) +} + +func splitHostPort(s string) (host string, port string) { + host, port, _ = net.SplitHostPort(s) + if len(host) == 0 && len(port) == 0 { + host = s + port = "9092" + } + return +} + +func splitHostPortNumber(s string) (host string, portNumber int, err error) { + host, port := splitHostPort(s) + portNumber, err = strconv.Atoi(port) + if err != nil { + return host, 0, fmt.Errorf("%s: %w", s, err) + } + return host, portNumber, nil +} + +func lookupHost(ctx context.Context, address string, resolver Resolver) (string, error) { + host, port := splitHostPort(address) + + if resolver != nil { + resolved, err := resolver.LookupHost(ctx, host) + if err != nil { + return "", fmt.Errorf("failed to resolve host %s: %w", host, err) + } + + // if the resolver doesn't return anything, we'll fall back on the provided + // address instead + if len(resolved) > 0 { + resolvedHost, resolvedPort := splitHostPort(resolved[0]) + + // we'll always prefer the resolved host + host = resolvedHost + + // in the case of port though, the provided address takes priority, and we + // only use the resolved address to set the port when not specified + if port == "" { + port = resolvedPort + } + } + } + + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/github.com/segmentio/kafka-go/discard.go b/vendor/github.com/segmentio/kafka-go/discard.go new file mode 100644 index 00000000000..0cb1be9d066 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/discard.go @@ -0,0 +1,38 @@ +package kafka + +import "bufio" + +func discardN(r *bufio.Reader, sz int, n int) (int, error) { + var err error + if n <= sz { + n, err = r.Discard(n) + } else { + n, err = r.Discard(sz) + if err == nil { + err = errShortRead + } + } + return sz - n, err +} + +func discardInt32(r *bufio.Reader, sz int) (int, error) { + return discardN(r, sz, 4) +} + +func discardString(r *bufio.Reader, sz int) (int, error) { + return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { + if n < 0 { + return sz, nil + } + return discardN(r, sz, n) + }) +} + +func discardBytes(r *bufio.Reader, sz int) (int, error) { + return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (int, error) { + if n < 0 { + return sz, nil + } + return discardN(r, sz, n) + }) +} diff --git a/vendor/github.com/segmentio/kafka-go/docker-compose-241.yml b/vendor/github.com/segmentio/kafka-go/docker-compose-241.yml new file mode 100644 index 00000000000..6feb1844bac --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/docker-compose-241.yml @@ -0,0 +1,32 @@ +version: "3" +services: + kafka: + image: wurstmeister/kafka:2.12-2.4.1 + restart: on-failure:3 + links: + - zookeeper + ports: + - 9092:9092 + - 9093:9093 + environment: + KAFKA_VERSION: '2.4.1' + KAFKA_BROKER_ID: '1' + KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_ADVERTISED_HOST_NAME: 'localhost' + KAFKA_ADVERTISED_PORT: '9092' + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_MESSAGE_MAX_BYTES: '200000000' + KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' + KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' + KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf" + CUSTOM_INIT_SCRIPT: |- + echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf; + /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]' --entity-type users --entity-name adminscram + + zookeeper: + image: wurstmeister/zookeeper + ports: + - 2181:2181 diff --git a/vendor/github.com/segmentio/kafka-go/docker-compose.010.yml b/vendor/github.com/segmentio/kafka-go/docker-compose.010.yml new file mode 100644 index 00000000000..56123f85cc0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/docker-compose.010.yml @@ -0,0 +1,29 @@ +version: "3" +services: + kafka: + image: wurstmeister/kafka:0.10.1.1 + links: + - zookeeper + ports: + - 9092:9092 + - 9093:9093 + environment: + KAFKA_BROKER_ID: '1' + KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_ADVERTISED_HOST_NAME: 'localhost' + KAFKA_ADVERTISED_PORT: '9092' + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_MESSAGE_MAX_BYTES: '200000000' + KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' + KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN' + KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf" + CUSTOM_INIT_SCRIPT: |- + echo -e 'KafkaServer {\norg.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf; + + zookeeper: + image: wurstmeister/zookeeper + ports: + - 2181:2181 diff --git a/vendor/github.com/segmentio/kafka-go/docker-compose.yml b/vendor/github.com/segmentio/kafka-go/docker-compose.yml new file mode 100644 index 00000000000..dc0c2e85e89 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/docker-compose.yml @@ -0,0 +1,34 @@ +version: "3" +services: + kafka: + image: wurstmeister/kafka:2.12-2.3.1 + restart: on-failure:3 + links: + - zookeeper + ports: + - 9092:9092 + - 9093:9093 + environment: + KAFKA_VERSION: '2.3.1' + KAFKA_BROKER_ID: '1' + KAFKA_CREATE_TOPICS: 'test-writer-0:3:1,test-writer-1:3:1' + KAFKA_DELETE_TOPIC_ENABLE: 'true' + KAFKA_ADVERTISED_HOST_NAME: 'localhost' + KAFKA_ADVERTISED_PORT: '9092' + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true' + KAFKA_MESSAGE_MAX_BYTES: '200000000' + KAFKA_LISTENERS: 'PLAINTEXT://:9092,SASL_PLAINTEXT://:9093' + KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://localhost:9092,SASL_PLAINTEXT://localhost:9093' + KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN,SCRAM-SHA-256,SCRAM-SHA-512' + KAFKA_AUTHORIZER_CLASS_NAME: 'kafka.security.auth.SimpleAclAuthorizer' + KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true' + KAFKA_OPTS: "-Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf" + CUSTOM_INIT_SCRIPT: |- + echo -e 'KafkaServer {\norg.apache.kafka.common.security.scram.ScramLoginModule required\n username="adminscram"\n password="admin-secret";\n org.apache.kafka.common.security.plain.PlainLoginModule required\n username="adminplain"\n password="admin-secret"\n user_adminplain="admin-secret";\n };' > /opt/kafka/config/kafka_server_jaas.conf; + /opt/kafka/bin/kafka-configs.sh --zookeeper zookeeper:2181 --alter --add-config 'SCRAM-SHA-256=[password=admin-secret-256],SCRAM-SHA-512=[password=admin-secret-512]' --entity-type users --entity-name adminscram + + zookeeper: + image: wurstmeister/zookeeper + ports: + - 2181:2181 diff --git a/vendor/github.com/segmentio/kafka-go/electleaders.go b/vendor/github.com/segmentio/kafka-go/electleaders.go new file mode 100644 index 00000000000..2dd63b73db7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/electleaders.go @@ -0,0 +1,89 @@ +package kafka + +import ( + "context" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/electleaders" +) + +// ElectLeadersRequest is a request to the ElectLeaders API. +type ElectLeadersRequest struct { + // Addr is the address of the kafka broker to send the request to. + Addr net.Addr + + // Topic is the name of the topic to do the leader elections in. + Topic string + + // Partitions is the list of partitions to run leader elections for. + Partitions []int + + // Timeout is the amount of time to wait for the election to run. + Timeout time.Duration +} + +// ElectLeadersResponse is a response from the ElectLeaders API. +type ElectLeadersResponse struct { + // ErrorCode is set to a non-nil value if a top-level error occurred. + Error error + + // PartitionResults contains the results for each partition leader election. + PartitionResults []ElectLeadersResponsePartitionResult +} + +// ElectLeadersResponsePartitionResult contains the response details for a single partition. +type ElectLeadersResponsePartitionResult struct { + // Partition is the ID of the partition. + Partition int + + // Error is set to a non-nil value if an error occurred electing leaders + // for this partition. + Error error +} + +func (c *Client) ElectLeaders( + ctx context.Context, + req *ElectLeadersRequest, +) (*ElectLeadersResponse, error) { + partitions32 := []int32{} + for _, partition := range req.Partitions { + partitions32 = append(partitions32, int32(partition)) + } + + protoResp, err := c.roundTrip( + ctx, + req.Addr, + &electleaders.Request{ + TopicPartitions: []electleaders.RequestTopicPartitions{ + { + Topic: req.Topic, + PartitionIDs: partitions32, + }, + }, + TimeoutMs: int32(req.Timeout.Milliseconds()), + }, + ) + if err != nil { + return nil, err + } + apiResp := protoResp.(*electleaders.Response) + + resp := &ElectLeadersResponse{ + Error: makeError(apiResp.ErrorCode, ""), + } + + for _, topicResult := range apiResp.ReplicaElectionResults { + for _, partitionResult := range topicResult.PartitionResults { + resp.PartitionResults = append( + resp.PartitionResults, + ElectLeadersResponsePartitionResult{ + Partition: int(partitionResult.PartitionID), + Error: makeError(partitionResult.ErrorCode, partitionResult.ErrorMessage), + }, + ) + } + } + + return resp, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/endtxn.go b/vendor/github.com/segmentio/kafka-go/endtxn.go new file mode 100644 index 00000000000..ebfeab2eee3 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/endtxn.go @@ -0,0 +1,61 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/endtxn" +) + +// EndTxnRequest represets a request sent to a kafka broker to end a transaction. +type EndTxnRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key. + TransactionalID string + + // The Producer ID (PID) for the current producer session + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int + + // Committed should be set to true if the transaction was committed, false otherwise. + Committed bool +} + +// EndTxnResponse represents a resposne from a kafka broker to an end transaction request. +type EndTxnResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Error is non-nil if an error occureda and contains the kafka error code. + // Programs may use the standard errors.Is function to test the error + // against kafka error codes. + Error error +} + +// EndTxn sends an EndTxn request to a kafka broker and returns its response. +func (c *Client) EndTxn(ctx context.Context, req *EndTxnRequest) (*EndTxnResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &endtxn.Request{ + TransactionalID: req.TransactionalID, + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + Committed: req.Committed, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).EndTxn: %w", err) + } + + r := m.(*endtxn.Response) + + res := &EndTxnResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Error: makeError(r.ErrorCode, ""), + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/error.go b/vendor/github.com/segmentio/kafka-go/error.go new file mode 100644 index 00000000000..4a7a8a278a0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/error.go @@ -0,0 +1,712 @@ +package kafka + +import ( + "errors" + "fmt" + "io" + "syscall" +) + +// Error represents the different error codes that may be returned by kafka. +// https://kafka.apache.org/protocol#protocol_error_codes +type Error int + +const ( + Unknown Error = -1 + OffsetOutOfRange Error = 1 + InvalidMessage Error = 2 + UnknownTopicOrPartition Error = 3 + InvalidMessageSize Error = 4 + LeaderNotAvailable Error = 5 + NotLeaderForPartition Error = 6 + RequestTimedOut Error = 7 + BrokerNotAvailable Error = 8 + ReplicaNotAvailable Error = 9 + MessageSizeTooLarge Error = 10 + StaleControllerEpoch Error = 11 + OffsetMetadataTooLarge Error = 12 + NetworkException Error = 13 + GroupLoadInProgress Error = 14 + GroupCoordinatorNotAvailable Error = 15 + NotCoordinatorForGroup Error = 16 + InvalidTopic Error = 17 + RecordListTooLarge Error = 18 + NotEnoughReplicas Error = 19 + NotEnoughReplicasAfterAppend Error = 20 + InvalidRequiredAcks Error = 21 + IllegalGeneration Error = 22 + InconsistentGroupProtocol Error = 23 + InvalidGroupId Error = 24 + UnknownMemberId Error = 25 + InvalidSessionTimeout Error = 26 + RebalanceInProgress Error = 27 + InvalidCommitOffsetSize Error = 28 + TopicAuthorizationFailed Error = 29 + GroupAuthorizationFailed Error = 30 + ClusterAuthorizationFailed Error = 31 + InvalidTimestamp Error = 32 + UnsupportedSASLMechanism Error = 33 + IllegalSASLState Error = 34 + UnsupportedVersion Error = 35 + TopicAlreadyExists Error = 36 + InvalidPartitionNumber Error = 37 + InvalidReplicationFactor Error = 38 + InvalidReplicaAssignment Error = 39 + InvalidConfiguration Error = 40 + NotController Error = 41 + InvalidRequest Error = 42 + UnsupportedForMessageFormat Error = 43 + PolicyViolation Error = 44 + OutOfOrderSequenceNumber Error = 45 + DuplicateSequenceNumber Error = 46 + InvalidProducerEpoch Error = 47 + InvalidTransactionState Error = 48 + InvalidProducerIDMapping Error = 49 + InvalidTransactionTimeout Error = 50 + ConcurrentTransactions Error = 51 + TransactionCoordinatorFenced Error = 52 + TransactionalIDAuthorizationFailed Error = 53 + SecurityDisabled Error = 54 + BrokerAuthorizationFailed Error = 55 + KafkaStorageError Error = 56 + LogDirNotFound Error = 57 + SASLAuthenticationFailed Error = 58 + UnknownProducerId Error = 59 + ReassignmentInProgress Error = 60 + DelegationTokenAuthDisabled Error = 61 + DelegationTokenNotFound Error = 62 + DelegationTokenOwnerMismatch Error = 63 + DelegationTokenRequestNotAllowed Error = 64 + DelegationTokenAuthorizationFailed Error = 65 + DelegationTokenExpired Error = 66 + InvalidPrincipalType Error = 67 + NonEmptyGroup Error = 68 + GroupIdNotFound Error = 69 + FetchSessionIDNotFound Error = 70 + InvalidFetchSessionEpoch Error = 71 + ListenerNotFound Error = 72 + TopicDeletionDisabled Error = 73 + FencedLeaderEpoch Error = 74 + UnknownLeaderEpoch Error = 75 + UnsupportedCompressionType Error = 76 + StaleBrokerEpoch Error = 77 + OffsetNotAvailable Error = 78 + MemberIDRequired Error = 79 + PreferredLeaderNotAvailable Error = 80 + GroupMaxSizeReached Error = 81 + FencedInstanceID Error = 82 + EligibleLeadersNotAvailable Error = 83 + ElectionNotNeeded Error = 84 + NoReassignmentInProgress Error = 85 + GroupSubscribedToTopic Error = 86 + InvalidRecord Error = 87 + UnstableOffsetCommit Error = 88 + ThrottlingQuotaExceeded Error = 89 + ProducerFenced Error = 90 + ResourceNotFound Error = 91 + DuplicateResource Error = 92 + UnacceptableCredential Error = 93 + InconsistentVoterSet Error = 94 + InvalidUpdateVersion Error = 95 + FeatureUpdateFailed Error = 96 + PrincipalDeserializationFailure Error = 97 + SnapshotNotFound Error = 98 + PositionOutOfRange Error = 99 + UnknownTopicID Error = 100 + DuplicateBrokerRegistration Error = 101 + BrokerIDNotRegistered Error = 102 + InconsistentTopicID Error = 103 + InconsistentClusterID Error = 104 + TransactionalIDNotFound Error = 105 + FetchSessionTopicIDError Error = 106 +) + +// Error satisfies the error interface. +func (e Error) Error() string { + return fmt.Sprintf("[%d] %s: %s", e, e.Title(), e.Description()) +} + +// Timeout returns true if the error was due to a timeout. +func (e Error) Timeout() bool { + return e == RequestTimedOut +} + +// Temporary returns true if the operation that generated the error may succeed +// if retried at a later time. +// Kafka error documentation specifies these as "retriable" +// https://kafka.apache.org/protocol#protocol_error_codes +func (e Error) Temporary() bool { + switch e { + case InvalidMessage, + UnknownTopicOrPartition, + LeaderNotAvailable, + NotLeaderForPartition, + RequestTimedOut, + NetworkException, + GroupLoadInProgress, + GroupCoordinatorNotAvailable, + NotCoordinatorForGroup, + NotEnoughReplicas, + NotEnoughReplicasAfterAppend, + NotController, + KafkaStorageError, + FetchSessionIDNotFound, + InvalidFetchSessionEpoch, + ListenerNotFound, + FencedLeaderEpoch, + UnknownLeaderEpoch, + OffsetNotAvailable, + PreferredLeaderNotAvailable, + EligibleLeadersNotAvailable, + ElectionNotNeeded, + NoReassignmentInProgress, + GroupSubscribedToTopic, + UnstableOffsetCommit, + ThrottlingQuotaExceeded, + UnknownTopicID, + InconsistentTopicID, + FetchSessionTopicIDError: + return true + default: + return false + } +} + +// Title returns a human readable title for the error. +func (e Error) Title() string { + switch e { + case Unknown: + return "Unknown" + case OffsetOutOfRange: + return "Offset Out Of Range" + case InvalidMessage: + return "Invalid Message" + case UnknownTopicOrPartition: + return "Unknown Topic Or Partition" + case InvalidMessageSize: + return "Invalid Message Size" + case LeaderNotAvailable: + return "Leader Not Available" + case NotLeaderForPartition: + return "Not Leader For Partition" + case RequestTimedOut: + return "Request Timed Out" + case BrokerNotAvailable: + return "Broker Not Available" + case ReplicaNotAvailable: + return "Replica Not Available" + case MessageSizeTooLarge: + return "Message Size Too Large" + case StaleControllerEpoch: + return "Stale Controller Epoch" + case OffsetMetadataTooLarge: + return "Offset Metadata Too Large" + case GroupLoadInProgress: + return "Group Load In Progress" + case GroupCoordinatorNotAvailable: + return "Group Coordinator Not Available" + case NotCoordinatorForGroup: + return "Not Coordinator For Group" + case InvalidTopic: + return "Invalid Topic" + case RecordListTooLarge: + return "Record List Too Large" + case NotEnoughReplicas: + return "Not Enough Replicas" + case NotEnoughReplicasAfterAppend: + return "Not Enough Replicas After Append" + case InvalidRequiredAcks: + return "Invalid Required Acks" + case IllegalGeneration: + return "Illegal Generation" + case InconsistentGroupProtocol: + return "Inconsistent Group Protocol" + case InvalidGroupId: + return "Invalid Group ID" + case UnknownMemberId: + return "Unknown Member ID" + case InvalidSessionTimeout: + return "Invalid Session Timeout" + case RebalanceInProgress: + return "Rebalance In Progress" + case InvalidCommitOffsetSize: + return "Invalid Commit Offset Size" + case TopicAuthorizationFailed: + return "Topic Authorization Failed" + case GroupAuthorizationFailed: + return "Group Authorization Failed" + case ClusterAuthorizationFailed: + return "Cluster Authorization Failed" + case InvalidTimestamp: + return "Invalid Timestamp" + case UnsupportedSASLMechanism: + return "Unsupported SASL Mechanism" + case IllegalSASLState: + return "Illegal SASL State" + case UnsupportedVersion: + return "Unsupported Version" + case TopicAlreadyExists: + return "Topic Already Exists" + case InvalidPartitionNumber: + return "Invalid Partition Number" + case InvalidReplicationFactor: + return "Invalid Replication Factor" + case InvalidReplicaAssignment: + return "Invalid Replica Assignment" + case InvalidConfiguration: + return "Invalid Configuration" + case NotController: + return "Not Controller" + case InvalidRequest: + return "Invalid Request" + case UnsupportedForMessageFormat: + return "Unsupported For Message Format" + case PolicyViolation: + return "Policy Violation" + case OutOfOrderSequenceNumber: + return "Out Of Order Sequence Number" + case DuplicateSequenceNumber: + return "Duplicate Sequence Number" + case InvalidProducerEpoch: + return "Invalid Producer Epoch" + case InvalidTransactionState: + return "Invalid Transaction State" + case InvalidProducerIDMapping: + return "Invalid Producer ID Mapping" + case InvalidTransactionTimeout: + return "Invalid Transaction Timeout" + case ConcurrentTransactions: + return "Concurrent Transactions" + case TransactionCoordinatorFenced: + return "Transaction Coordinator Fenced" + case TransactionalIDAuthorizationFailed: + return "Transactional ID Authorization Failed" + case SecurityDisabled: + return "Security Disabled" + case BrokerAuthorizationFailed: + return "Broker Authorization Failed" + case KafkaStorageError: + return "Kafka Storage Error" + case LogDirNotFound: + return "Log Dir Not Found" + case SASLAuthenticationFailed: + return "SASL Authentication Failed" + case UnknownProducerId: + return "Unknown Producer ID" + case ReassignmentInProgress: + return "Reassignment In Progress" + case DelegationTokenAuthDisabled: + return "Delegation Token Auth Disabled" + case DelegationTokenNotFound: + return "Delegation Token Not Found" + case DelegationTokenOwnerMismatch: + return "Delegation Token Owner Mismatch" + case DelegationTokenRequestNotAllowed: + return "Delegation Token Request Not Allowed" + case DelegationTokenAuthorizationFailed: + return "Delegation Token Authorization Failed" + case DelegationTokenExpired: + return "Delegation Token Expired" + case InvalidPrincipalType: + return "Invalid Principal Type" + case NonEmptyGroup: + return "Non Empty Group" + case GroupIdNotFound: + return "Group ID Not Found" + case FetchSessionIDNotFound: + return "Fetch Session ID Not Found" + case InvalidFetchSessionEpoch: + return "Invalid Fetch Session Epoch" + case ListenerNotFound: + return "Listener Not Found" + case TopicDeletionDisabled: + return "Topic Deletion Disabled" + case FencedLeaderEpoch: + return "Fenced Leader Epoch" + case UnknownLeaderEpoch: + return "Unknown Leader Epoch" + case UnsupportedCompressionType: + return "Unsupported Compression Type" + case MemberIDRequired: + return "Member ID Required" + case EligibleLeadersNotAvailable: + return "Eligible Leader Not Available" + case ElectionNotNeeded: + return "Election Not Needed" + case NoReassignmentInProgress: + return "No Reassignment In Progress" + case GroupSubscribedToTopic: + return "Group Subscribed To Topic" + case InvalidRecord: + return "Invalid Record" + case UnstableOffsetCommit: + return "Unstable Offset Commit" + case ThrottlingQuotaExceeded: + return "Throttling Quota Exceeded" + case ProducerFenced: + return "Producer Fenced" + case ResourceNotFound: + return "Resource Not Found" + case DuplicateResource: + return "Duplicate Resource" + case UnacceptableCredential: + return "Unacceptable Credential" + case InconsistentVoterSet: + return "Inconsistent Voter Set" + case InvalidUpdateVersion: + return "Invalid Update Version" + case FeatureUpdateFailed: + return "Feature Update Failed" + case PrincipalDeserializationFailure: + return "Principal Deserialization Failure" + case SnapshotNotFound: + return "Snapshot Not Found" + case PositionOutOfRange: + return "Position Out Of Range" + case UnknownTopicID: + return "Unknown Topic ID" + case DuplicateBrokerRegistration: + return "Duplicate Broker Registration" + case BrokerIDNotRegistered: + return "Broker ID Not Registered" + case InconsistentTopicID: + return "Inconsistent Topic ID" + case InconsistentClusterID: + return "Inconsistent Cluster ID" + case TransactionalIDNotFound: + return "Transactional ID Not Found" + case FetchSessionTopicIDError: + return "Fetch Session Topic ID Error" + } + return "" +} + +// Description returns a human readable description of cause of the error. +func (e Error) Description() string { + switch e { + case Unknown: + return "an unexpected server error occurred" + case OffsetOutOfRange: + return "the requested offset is outside the range of offsets maintained by the server for the given topic/partition" + case InvalidMessage: + return "the message contents does not match its CRC" + case UnknownTopicOrPartition: + return "the request is for a topic or partition that does not exist on this broker" + case InvalidMessageSize: + return "the message has a negative size" + case LeaderNotAvailable: + return "the cluster is in the middle of a leadership election and there is currently no leader for this partition and hence it is unavailable for writes" + case NotLeaderForPartition: + return "the client attempted to send messages to a replica that is not the leader for some partition, the client's metadata are likely out of date" + case RequestTimedOut: + return "the request exceeded the user-specified time limit in the request" + case BrokerNotAvailable: + return "not a client facing error and is used mostly by tools when a broker is not alive" + case ReplicaNotAvailable: + return "a replica is expected on a broker, but is not (this can be safely ignored)" + case MessageSizeTooLarge: + return "the server has a configurable maximum message size to avoid unbounded memory allocation and the client attempted to produce a message larger than this maximum" + case StaleControllerEpoch: + return "internal error code for broker-to-broker communication" + case OffsetMetadataTooLarge: + return "the client specified a string larger than configured maximum for offset metadata" + case GroupLoadInProgress: + return "the broker returns this error code for an offset fetch request if it is still loading offsets (after a leader change for that offsets topic partition), or in response to group membership requests (such as heartbeats) when group metadata is being loaded by the coordinator" + case GroupCoordinatorNotAvailable: + return "the broker returns this error code for group coordinator requests, offset commits, and most group management requests if the offsets topic has not yet been created, or if the group coordinator is not active" + case NotCoordinatorForGroup: + return "the broker returns this error code if it receives an offset fetch or commit request for a group that it is not a coordinator for" + case InvalidTopic: + return "a request which attempted to access an invalid topic (e.g. one which has an illegal name), or if an attempt was made to write to an internal topic (such as the consumer offsets topic)" + case RecordListTooLarge: + return "a message batch in a produce request exceeds the maximum configured segment size" + case NotEnoughReplicas: + return "the number of in-sync replicas is lower than the configured minimum and requiredAcks is -1" + case NotEnoughReplicasAfterAppend: + return "the message was written to the log, but with fewer in-sync replicas than required." + case InvalidRequiredAcks: + return "the requested requiredAcks is invalid (anything other than -1, 1, or 0)" + case IllegalGeneration: + return "the generation id provided in the request is not the current generation" + case InconsistentGroupProtocol: + return "the member provided a protocol type or set of protocols which is not compatible with the current group" + case InvalidGroupId: + return "the group id is empty or null" + case UnknownMemberId: + return "the member id is not in the current generation" + case InvalidSessionTimeout: + return "the requested session timeout is outside of the allowed range on the broker" + case RebalanceInProgress: + return "the coordinator has begun rebalancing the group, the client should rejoin the group" + case InvalidCommitOffsetSize: + return "an offset commit was rejected because of oversize metadata" + case TopicAuthorizationFailed: + return "the client is not authorized to access the requested topic" + case GroupAuthorizationFailed: + return "the client is not authorized to access a particular group id" + case ClusterAuthorizationFailed: + return "the client is not authorized to use an inter-broker or administrative API" + case InvalidTimestamp: + return "the timestamp of the message is out of acceptable range" + case UnsupportedSASLMechanism: + return "the broker does not support the requested SASL mechanism" + case IllegalSASLState: + return "the request is not valid given the current SASL state" + case UnsupportedVersion: + return "the version of API is not supported" + case TopicAlreadyExists: + return "a topic with this name already exists" + case InvalidPartitionNumber: + return "the number of partitions is invalid" + case InvalidReplicationFactor: + return "the replication-factor is invalid" + case InvalidReplicaAssignment: + return "the replica assignment is invalid" + case InvalidConfiguration: + return "the configuration is invalid" + case NotController: + return "this is not the correct controller for this cluster" + case InvalidRequest: + return "this most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker, se the broker logs for more details" + case UnsupportedForMessageFormat: + return "the message format version on the broker does not support the request" + case PolicyViolation: + return "the request parameters do not satisfy the configured policy" + case OutOfOrderSequenceNumber: + return "the broker received an out of order sequence number" + case DuplicateSequenceNumber: + return "the broker received a duplicate sequence number" + case InvalidProducerEpoch: + return "the producer attempted an operation with an old epoch, either there is a newer producer with the same transactional ID, or the producer's transaction has been expired by the broker" + case InvalidTransactionState: + return "the producer attempted a transactional operation in an invalid state" + case InvalidProducerIDMapping: + return "the producer attempted to use a producer id which is not currently assigned to its transactional ID" + case InvalidTransactionTimeout: + return "the transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)" + case ConcurrentTransactions: + return "the producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing" + case TransactionCoordinatorFenced: + return "the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer" + case TransactionalIDAuthorizationFailed: + return "the transactional ID authorization failed" + case SecurityDisabled: + return "the security features are disabled" + case BrokerAuthorizationFailed: + return "the broker authorization failed" + case KafkaStorageError: + return "disk error when trying to access log file on the disk" + case LogDirNotFound: + return "the user-specified log directory is not found in the broker config" + case SASLAuthenticationFailed: + return "SASL Authentication failed" + case UnknownProducerId: + return "the broker could not locate the producer metadata associated with the producer ID" + case ReassignmentInProgress: + return "a partition reassignment is in progress" + case DelegationTokenAuthDisabled: + return "delegation token feature is not enabled" + case DelegationTokenNotFound: + return "delegation token is not found on server" + case DelegationTokenOwnerMismatch: + return "specified principal is not valid owner/renewer" + case DelegationTokenRequestNotAllowed: + return "delegation token requests are not allowed on plaintext/1-way ssl channels and on delegation token authenticated channels" + case DelegationTokenAuthorizationFailed: + return "delegation token authorization failed" + case DelegationTokenExpired: + return "delegation token is expired" + case InvalidPrincipalType: + return "supplied principaltype is not supported" + case NonEmptyGroup: + return "the group is not empty" + case GroupIdNotFound: + return "the group ID does not exist" + case FetchSessionIDNotFound: + return "the fetch session ID was not found" + case InvalidFetchSessionEpoch: + return "the fetch session epoch is invalid" + case ListenerNotFound: + return "there is no listener on the leader broker that matches the listener on which metadata request was processed" + case TopicDeletionDisabled: + return "topic deletion is disabled" + case FencedLeaderEpoch: + return "the leader epoch in the request is older than the epoch on the broker" + case UnknownLeaderEpoch: + return "the leader epoch in the request is newer than the epoch on the broker" + case UnsupportedCompressionType: + return "the requesting client does not support the compression type of given partition" + case MemberIDRequired: + return "the group member needs to have a valid member id before actually entering a consumer group" + case EligibleLeadersNotAvailable: + return "eligible topic partition leaders are not available" + case ElectionNotNeeded: + return "leader election not needed for topic partition" + case NoReassignmentInProgress: + return "no partition reassignment is in progress" + case GroupSubscribedToTopic: + return "deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it" + case InvalidRecord: + return "this record has failed the validation on broker and hence be rejected" + case UnstableOffsetCommit: + return "there are unstable offsets that need to be cleared" + case ThrottlingQuotaExceeded: + return "The throttling quota has been exceeded" + case ProducerFenced: + return "There is a newer producer with the same transactionalId which fences the current one" + case ResourceNotFound: + return "A request illegally referred to a resource that does not exist" + case DuplicateResource: + return "A request illegally referred to the same resource twice" + case UnacceptableCredential: + return "Requested credential would not meet criteria for acceptability" + case InconsistentVoterSet: + return "Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters" + case InvalidUpdateVersion: + return "The given update version was invalid" + case FeatureUpdateFailed: + return "Unable to update finalized features due to an unexpected server error" + case PrincipalDeserializationFailure: + return "Request principal deserialization failed during forwarding. This indicates an internal error on the broker cluster security setup" + case SnapshotNotFound: + return "Requested snapshot was not found" + case PositionOutOfRange: + return "Requested position is not greater than or equal to zero, and less than the size of the snapshot" + case UnknownTopicID: + return "This server does not host this topic ID" + case DuplicateBrokerRegistration: + return "This broker ID is already in use" + case BrokerIDNotRegistered: + return "The given broker ID was not registered" + case InconsistentTopicID: + return "The log's topic ID did not match the topic ID in the request" + case InconsistentClusterID: + return "The clusterId in the request does not match that found on the server" + case TransactionalIDNotFound: + return "The transactionalId could not be found" + case FetchSessionTopicIDError: + return "The fetch session encountered inconsistent topic ID usage" + } + return "" +} + +func isTimeout(err error) bool { + var timeoutError interface{ Timeout() bool } + if errors.As(err, &timeoutError) { + return timeoutError.Timeout() + } + return false +} + +func isTemporary(err error) bool { + var tempError interface{ Temporary() bool } + if errors.As(err, &tempError) { + return tempError.Temporary() + } + return false +} + +func isTransientNetworkError(err error) bool { + return errors.Is(err, io.ErrUnexpectedEOF) || + errors.Is(err, syscall.ECONNREFUSED) || + errors.Is(err, syscall.ECONNRESET) || + errors.Is(err, syscall.EPIPE) +} + +func silentEOF(err error) error { + if errors.Is(err, io.EOF) { + err = nil + } + return err +} + +func dontExpectEOF(err error) error { + if errors.Is(err, io.EOF) { + return io.ErrUnexpectedEOF + } + return err +} + +func coalesceErrors(errs ...error) error { + for _, err := range errs { + if err != nil { + return err + } + } + return nil +} + +type MessageTooLargeError struct { + Message Message + Remaining []Message +} + +func messageTooLarge(msgs []Message, i int) MessageTooLargeError { + remain := make([]Message, 0, len(msgs)-1) + remain = append(remain, msgs[:i]...) + remain = append(remain, msgs[i+1:]...) + return MessageTooLargeError{ + Message: msgs[i], + Remaining: remain, + } +} + +func (e MessageTooLargeError) Error() string { + return MessageSizeTooLarge.Error() +} + +func makeError(code int16, message string) error { + if code == 0 { + return nil + } + if message == "" { + return Error(code) + } + return fmt.Errorf("%w: %s", Error(code), message) +} + +// WriteError is returned by kafka.(*Writer).WriteMessages when the writer is +// not configured to write messages asynchronously. WriteError values contain +// a list of errors where each entry matches the position of a message in the +// WriteMessages call. The program can determine the status of each message by +// looping over the error: +// +// switch err := w.WriteMessages(ctx, msgs...).(type) { +// case nil: +// case kafka.WriteErrors: +// for i := range msgs { +// if err[i] != nil { +// // handle the error writing msgs[i] +// ... +// } +// } +// default: +// // handle other errors +// ... +// } +type WriteErrors []error + +// Count counts the number of non-nil errors in err. +func (err WriteErrors) Count() int { + n := 0 + + for _, e := range err { + if e != nil { + n++ + } + } + + return n +} + +func (err WriteErrors) Error() string { + errCount := err.Count() + errors := make([]string, 0, errCount) + for _, writeError := range err { + if writeError == nil { + continue + } + errors = append(errors, writeError.Error()) + } + return fmt.Sprintf("Kafka write errors (%d/%d), errors: %v", errCount, len(err), errors) +} diff --git a/vendor/github.com/segmentio/kafka-go/fetch.go b/vendor/github.com/segmentio/kafka-go/fetch.go new file mode 100644 index 00000000000..eafd0de88f2 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/fetch.go @@ -0,0 +1,289 @@ +package kafka + +import ( + "context" + "fmt" + "math" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol" + fetchAPI "github.com/segmentio/kafka-go/protocol/fetch" +) + +// FetchRequest represents a request sent to a kafka broker to retrieve records +// from a topic partition. +type FetchRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // Topic, partition, and offset to retrieve records from. The offset may be + // one of the special FirstOffset or LastOffset constants, in which case the + // request will automatically discover the first or last offset of the + // partition and submit the request for these. + Topic string + Partition int + Offset int64 + + // Size and time limits of the response returned by the broker. + MinBytes int64 + MaxBytes int64 + MaxWait time.Duration + + // The isolation level for the request. + // + // Defaults to ReadUncommitted. + // + // This field requires the kafka broker to support the Fetch API in version + // 4 or above (otherwise the value is ignored). + IsolationLevel IsolationLevel +} + +// FetchResponse represents a response from a kafka broker to a fetch request. +type FetchResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // The topic and partition that the response came for (will match the values + // in the request). + Topic string + Partition int + + // Information about the topic partition layout returned from the broker. + // + // LastStableOffset requires the kafka broker to support the Fetch API in + // version 4 or above (otherwise the value is zero). + // + /// LogStartOffset requires the kafka broker to support the Fetch API in + // version 5 or above (otherwise the value is zero). + HighWatermark int64 + LastStableOffset int64 + LogStartOffset int64 + + // An error that may have occurred while attempting to fetch the records. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error + + // The set of records returned in the response. + // + // The program is expected to call the RecordSet's Close method when it + // finished reading the records. + // + // Note that kafka may return record batches that start at an offset before + // the one that was requested. It is the program's responsibility to skip + // the offsets that it is not interested in. + Records RecordReader +} + +// Fetch sends a fetch request to a kafka broker and returns the response. +// +// If the broker returned an invalid response with no topics, an error wrapping +// protocol.ErrNoTopic is returned. +// +// If the broker returned an invalid response with no partitions, an error +// wrapping ErrNoPartitions is returned. +func (c *Client) Fetch(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { + timeout := c.timeout(ctx, math.MaxInt64) + maxWait := req.maxWait() + + if maxWait < timeout { + timeout = maxWait + } + + offset := req.Offset + switch offset { + case FirstOffset, LastOffset: + topic, partition := req.Topic, req.Partition + + r, err := c.ListOffsets(ctx, &ListOffsetsRequest{ + Addr: req.Addr, + Topics: map[string][]OffsetRequest{ + topic: {{ + Partition: partition, + Timestamp: offset, + }}, + }, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err) + } + + for _, p := range r.Topics[topic] { + if p.Partition == partition { + if p.Error != nil { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", p.Error) + } + switch offset { + case FirstOffset: + offset = p.FirstOffset + case LastOffset: + offset = p.LastOffset + } + break + } + } + } + + m, err := c.roundTrip(ctx, req.Addr, &fetchAPI.Request{ + ReplicaID: -1, + MaxWaitTime: milliseconds(timeout), + MinBytes: int32(req.MinBytes), + MaxBytes: int32(req.MaxBytes), + IsolationLevel: int8(req.IsolationLevel), + SessionID: -1, + SessionEpoch: -1, + Topics: []fetchAPI.RequestTopic{{ + Topic: req.Topic, + Partitions: []fetchAPI.RequestPartition{{ + Partition: int32(req.Partition), + CurrentLeaderEpoch: -1, + FetchOffset: offset, + LogStartOffset: -1, + PartitionMaxBytes: int32(req.MaxBytes), + }}, + }}, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", err) + } + + res := m.(*fetchAPI.Response) + if len(res.Topics) == 0 { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoTopic) + } + topic := &res.Topics[0] + if len(topic.Partitions) == 0 { + return nil, fmt.Errorf("kafka.(*Client).Fetch: %w", protocol.ErrNoPartition) + } + partition := &topic.Partitions[0] + + ret := &FetchResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Topic: topic.Topic, + Partition: int(partition.Partition), + Error: makeError(res.ErrorCode, ""), + HighWatermark: partition.HighWatermark, + LastStableOffset: partition.LastStableOffset, + LogStartOffset: partition.LogStartOffset, + Records: partition.RecordSet.Records, + } + + if partition.ErrorCode != 0 { + ret.Error = makeError(partition.ErrorCode, "") + } + + if ret.Records == nil { + ret.Records = NewRecordReader() + } + + return ret, nil +} + +func (req *FetchRequest) maxWait() time.Duration { + if req.MaxWait > 0 { + return req.MaxWait + } + return defaultMaxWait +} + +type fetchRequestV2 struct { + ReplicaID int32 + MaxWaitTime int32 + MinBytes int32 + Topics []fetchRequestTopicV2 +} + +func (r fetchRequestV2) size() int32 { + return 4 + 4 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) +} + +func (r fetchRequestV2) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ReplicaID) + wb.writeInt32(r.MaxWaitTime) + wb.writeInt32(r.MinBytes) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type fetchRequestTopicV2 struct { + TopicName string + Partitions []fetchRequestPartitionV2 +} + +func (t fetchRequestTopicV2) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t fetchRequestTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type fetchRequestPartitionV2 struct { + Partition int32 + FetchOffset int64 + MaxBytes int32 +} + +func (p fetchRequestPartitionV2) size() int32 { + return 4 + 8 + 4 +} + +func (p fetchRequestPartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt64(p.FetchOffset) + wb.writeInt32(p.MaxBytes) +} + +type fetchResponseV2 struct { + ThrottleTime int32 + Topics []fetchResponseTopicV2 +} + +func (r fetchResponseV2) size() int32 { + return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) +} + +func (r fetchResponseV2) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ThrottleTime) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type fetchResponseTopicV2 struct { + TopicName string + Partitions []fetchResponsePartitionV2 +} + +func (t fetchResponseTopicV2) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t fetchResponseTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type fetchResponsePartitionV2 struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + MessageSetSize int32 + MessageSet messageSet +} + +func (p fetchResponsePartitionV2) size() int32 { + return 4 + 2 + 8 + 4 + p.MessageSet.size() +} + +func (p fetchResponsePartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.HighwaterMarkOffset) + wb.writeInt32(p.MessageSetSize) + p.MessageSet.writeTo(wb) +} diff --git a/vendor/github.com/segmentio/kafka-go/findcoordinator.go b/vendor/github.com/segmentio/kafka-go/findcoordinator.go new file mode 100644 index 00000000000..cbf07153dc6 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/findcoordinator.go @@ -0,0 +1,170 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/findcoordinator" +) + +// CoordinatorKeyType is used to specify the type of coordinator to look for. +type CoordinatorKeyType int8 + +const ( + // CoordinatorKeyTypeConsumer type is used when looking for a Group coordinator. + CoordinatorKeyTypeConsumer CoordinatorKeyType = 0 + + // CoordinatorKeyTypeTransaction type is used when looking for a Transaction coordinator. + CoordinatorKeyTypeTransaction CoordinatorKeyType = 1 +) + +// FindCoordinatorRequest is the request structure for the FindCoordinator function. +type FindCoordinatorRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The coordinator key. + Key string + + // The coordinator key type. (Group, transaction, etc.) + KeyType CoordinatorKeyType +} + +// FindCoordinatorResponseCoordinator contains details about the found coordinator. +type FindCoordinatorResponseCoordinator struct { + // NodeID holds the broker id. + NodeID int + + // Host of the broker + Host string + + // Port on which broker accepts requests + Port int +} + +// FindCoordinatorResponse is the response structure for the FindCoordinator function. +type FindCoordinatorResponse struct { + // The Transaction/Group Coordinator details + Coordinator *FindCoordinatorResponseCoordinator + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // An error that may have occurred while attempting to retrieve Coordinator + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. + Error error +} + +// FindCoordinator sends a findCoordinator request to a kafka broker and returns the +// response. +func (c *Client) FindCoordinator(ctx context.Context, req *FindCoordinatorRequest) (*FindCoordinatorResponse, error) { + + m, err := c.roundTrip(ctx, req.Addr, &findcoordinator.Request{ + Key: req.Key, + KeyType: int8(req.KeyType), + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).FindCoordinator: %w", err) + } + + res := m.(*findcoordinator.Response) + coordinator := &FindCoordinatorResponseCoordinator{ + NodeID: int(res.NodeID), + Host: res.Host, + Port: int(res.Port), + } + ret := &FindCoordinatorResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(res.ErrorCode, res.ErrorMessage), + Coordinator: coordinator, + } + + return ret, nil +} + +// FindCoordinatorRequestV0 requests the coordinator for the specified group or transaction +// +// See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator +type findCoordinatorRequestV0 struct { + // CoordinatorKey holds id to use for finding the coordinator (for groups, this is + // the groupId, for transactional producers, this is the transactional id) + CoordinatorKey string +} + +func (t findCoordinatorRequestV0) size() int32 { + return sizeofString(t.CoordinatorKey) +} + +func (t findCoordinatorRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.CoordinatorKey) +} + +type findCoordinatorResponseCoordinatorV0 struct { + // NodeID holds the broker id. + NodeID int32 + + // Host of the broker + Host string + + // Port on which broker accepts requests + Port int32 +} + +func (t findCoordinatorResponseCoordinatorV0) size() int32 { + return sizeofInt32(t.NodeID) + + sizeofString(t.Host) + + sizeofInt32(t.Port) +} + +func (t findCoordinatorResponseCoordinatorV0) writeTo(wb *writeBuffer) { + wb.writeInt32(t.NodeID) + wb.writeString(t.Host) + wb.writeInt32(t.Port) +} + +func (t *findCoordinatorResponseCoordinatorV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt32(r, size, &t.NodeID); err != nil { + return + } + if remain, err = readString(r, remain, &t.Host); err != nil { + return + } + if remain, err = readInt32(r, remain, &t.Port); err != nil { + return + } + return +} + +type findCoordinatorResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // Coordinator holds host and port information for the coordinator + Coordinator findCoordinatorResponseCoordinatorV0 +} + +func (t findCoordinatorResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + + t.Coordinator.size() +} + +func (t findCoordinatorResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + t.Coordinator.writeTo(wb) +} + +func (t *findCoordinatorResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { + return + } + if remain, err = (&t.Coordinator).readFrom(r, remain); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/groupbalancer.go b/vendor/github.com/segmentio/kafka-go/groupbalancer.go new file mode 100644 index 00000000000..9491bc501cc --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/groupbalancer.go @@ -0,0 +1,339 @@ +package kafka + +import ( + "sort" +) + +// GroupMember describes a single participant in a consumer group. +type GroupMember struct { + // ID is the unique ID for this member as taken from the JoinGroup response. + ID string + + // Topics is a list of topics that this member is consuming. + Topics []string + + // UserData contains any information that the GroupBalancer sent to the + // consumer group coordinator. + UserData []byte +} + +// GroupMemberAssignments holds MemberID => topic => partitions. +type GroupMemberAssignments map[string]map[string][]int + +// GroupBalancer encapsulates the client side rebalancing logic. +type GroupBalancer interface { + // ProtocolName of the GroupBalancer + ProtocolName() string + + // UserData provides the GroupBalancer an opportunity to embed custom + // UserData into the metadata. + // + // Will be used by JoinGroup to begin the consumer group handshake. + // + // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-JoinGroupRequest + UserData() ([]byte, error) + + // DefineMemberships returns which members will be consuming + // which topic partitions + AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments +} + +// RangeGroupBalancer groups consumers by partition +// +// Example: 5 partitions, 2 consumers +// C0: [0, 1, 2] +// C1: [3, 4] +// +// Example: 6 partitions, 3 consumers +// C0: [0, 1] +// C1: [2, 3] +// C2: [4, 5] +// +type RangeGroupBalancer struct{} + +func (r RangeGroupBalancer) ProtocolName() string { + return "range" +} + +func (r RangeGroupBalancer) UserData() ([]byte, error) { + return nil, nil +} + +func (r RangeGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments { + groupAssignments := GroupMemberAssignments{} + membersByTopic := findMembersByTopic(members) + + for topic, members := range membersByTopic { + partitions := findPartitions(topic, topicPartitions) + partitionCount := len(partitions) + memberCount := len(members) + + for memberIndex, member := range members { + assignmentsByTopic, ok := groupAssignments[member.ID] + if !ok { + assignmentsByTopic = map[string][]int{} + groupAssignments[member.ID] = assignmentsByTopic + } + + minIndex := memberIndex * partitionCount / memberCount + maxIndex := (memberIndex + 1) * partitionCount / memberCount + + for partitionIndex, partition := range partitions { + if partitionIndex >= minIndex && partitionIndex < maxIndex { + assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) + } + } + } + } + + return groupAssignments +} + +// RoundrobinGroupBalancer divides partitions evenly among consumers +// +// Example: 5 partitions, 2 consumers +// C0: [0, 2, 4] +// C1: [1, 3] +// +// Example: 6 partitions, 3 consumers +// C0: [0, 3] +// C1: [1, 4] +// C2: [2, 5] +// +type RoundRobinGroupBalancer struct{} + +func (r RoundRobinGroupBalancer) ProtocolName() string { + return "roundrobin" +} + +func (r RoundRobinGroupBalancer) UserData() ([]byte, error) { + return nil, nil +} + +func (r RoundRobinGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments { + groupAssignments := GroupMemberAssignments{} + membersByTopic := findMembersByTopic(members) + for topic, members := range membersByTopic { + partitionIDs := findPartitions(topic, topicPartitions) + memberCount := len(members) + + for memberIndex, member := range members { + assignmentsByTopic, ok := groupAssignments[member.ID] + if !ok { + assignmentsByTopic = map[string][]int{} + groupAssignments[member.ID] = assignmentsByTopic + } + + for partitionIndex, partition := range partitionIDs { + if (partitionIndex % memberCount) == memberIndex { + assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) + } + } + } + } + + return groupAssignments +} + +// RackAffinityGroupBalancer makes a best effort to pair up consumers with +// partitions whose leader is in the same rack. This strategy can have +// performance benefits by minimizing round trip latency between the consumer +// and the broker. In environments where network traffic across racks incurs +// charges (such as cross AZ data transfer in AWS), this strategy is also a cost +// optimization measure because it keeps network traffic within the local rack +// where possible. +// +// The primary objective is to spread partitions evenly across consumers with a +// secondary focus on maximizing the number of partitions where the leader and +// the consumer are in the same rack. For best affinity, it's recommended to +// have a balanced spread of consumers and partition leaders across racks. +// +// This balancer requires Kafka version 0.10.0.0+ or later. Earlier versions do +// not return the brokers' racks in the metadata request. +type RackAffinityGroupBalancer struct { + // Rack is the name of the rack where this consumer is running. It will be + // communicated to the consumer group leader via the UserData so that + // assignments can be made with affinity to the partition leader. + Rack string +} + +func (r RackAffinityGroupBalancer) ProtocolName() string { + return "rack-affinity" +} + +func (r RackAffinityGroupBalancer) AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments { + membersByTopic := make(map[string][]GroupMember) + for _, m := range members { + for _, t := range m.Topics { + membersByTopic[t] = append(membersByTopic[t], m) + } + } + + partitionsByTopic := make(map[string][]Partition) + for _, p := range partitions { + partitionsByTopic[p.Topic] = append(partitionsByTopic[p.Topic], p) + } + + assignments := GroupMemberAssignments{} + for topic := range membersByTopic { + topicAssignments := r.assignTopic(membersByTopic[topic], partitionsByTopic[topic]) + for member, parts := range topicAssignments { + memberAssignments, ok := assignments[member] + if !ok { + memberAssignments = make(map[string][]int) + assignments[member] = memberAssignments + } + memberAssignments[topic] = parts + } + } + return assignments +} + +func (r RackAffinityGroupBalancer) UserData() ([]byte, error) { + return []byte(r.Rack), nil +} + +func (r *RackAffinityGroupBalancer) assignTopic(members []GroupMember, partitions []Partition) map[string][]int { + zonedPartitions := make(map[string][]int) + for _, part := range partitions { + zone := part.Leader.Rack + zonedPartitions[zone] = append(zonedPartitions[zone], part.ID) + } + + zonedConsumers := make(map[string][]string) + for _, member := range members { + zone := string(member.UserData) + zonedConsumers[zone] = append(zonedConsumers[zone], member.ID) + } + + targetPerMember := len(partitions) / len(members) + remainder := len(partitions) % len(members) + assignments := make(map[string][]int) + + // assign as many as possible in zone. this will assign up to partsPerMember + // to each consumer. it will also prefer to allocate remainder partitions + // in zone if possible. + for zone, parts := range zonedPartitions { + consumers := zonedConsumers[zone] + if len(consumers) == 0 { + continue + } + + // don't over-allocate. cap partition assignments at the calculated + // target. + partsPerMember := len(parts) / len(consumers) + if partsPerMember > targetPerMember { + partsPerMember = targetPerMember + } + + for _, consumer := range consumers { + assignments[consumer] = append(assignments[consumer], parts[:partsPerMember]...) + parts = parts[partsPerMember:] + } + + // if we had enough partitions for each consumer in this zone to hit its + // target, attempt to use any leftover partitions to satisfy the total + // remainder by adding at most 1 partition per consumer. + leftover := len(parts) + if partsPerMember == targetPerMember { + if leftover > remainder { + leftover = remainder + } + if leftover > len(consumers) { + leftover = len(consumers) + } + remainder -= leftover + } + + // this loop covers the case where we're assigning extra partitions or + // if there weren't enough to satisfy the targetPerMember and the zoned + // partitions didn't divide evenly. + for i := 0; i < leftover; i++ { + assignments[consumers[i]] = append(assignments[consumers[i]], parts[i]) + } + parts = parts[leftover:] + + if len(parts) == 0 { + delete(zonedPartitions, zone) + } else { + zonedPartitions[zone] = parts + } + } + + // assign out remainders regardless of zone. + var remaining []int + for _, partitions := range zonedPartitions { + remaining = append(remaining, partitions...) + } + + for _, member := range members { + assigned := assignments[member.ID] + delta := targetPerMember - len(assigned) + // if it were possible to assign the remainder in zone, it's been taken + // care of already. now we will portion out any remainder to a member + // that can take it. + if delta >= 0 && remainder > 0 { + delta++ + remainder-- + } + if delta > 0 { + assignments[member.ID] = append(assigned, remaining[:delta]...) + remaining = remaining[delta:] + } + } + + return assignments +} + +// findPartitions extracts the partition ids associated with the topic from the +// list of Partitions provided. +func findPartitions(topic string, partitions []Partition) []int { + var ids []int + for _, partition := range partitions { + if partition.Topic == topic { + ids = append(ids, partition.ID) + } + } + return ids +} + +// findMembersByTopic groups the memberGroupMetadata by topic. +func findMembersByTopic(members []GroupMember) map[string][]GroupMember { + membersByTopic := map[string][]GroupMember{} + for _, member := range members { + for _, topic := range member.Topics { + membersByTopic[topic] = append(membersByTopic[topic], member) + } + } + + // normalize ordering of members to enabling grouping across topics by partitions + // + // Want: + // C0 [T0/P0, T1/P0] + // C1 [T0/P1, T1/P1] + // + // Not: + // C0 [T0/P0, T1/P1] + // C1 [T0/P1, T1/P0] + // + // Even though the later is still round robin, the partitions are crossed + // + for _, members := range membersByTopic { + sort.Slice(members, func(i, j int) bool { + return members[i].ID < members[j].ID + }) + } + + return membersByTopic +} + +// findGroupBalancer returns the GroupBalancer with the specified protocolName +// from the slice provided. +func findGroupBalancer(protocolName string, balancers []GroupBalancer) (GroupBalancer, bool) { + for _, balancer := range balancers { + if balancer.ProtocolName() == protocolName { + return balancer, true + } + } + return nil, false +} diff --git a/vendor/github.com/segmentio/kafka-go/heartbeat.go b/vendor/github.com/segmentio/kafka-go/heartbeat.go new file mode 100644 index 00000000000..a0444dae14b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/heartbeat.go @@ -0,0 +1,109 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + heartbeatAPI "github.com/segmentio/kafka-go/protocol/heartbeat" +) + +// HeartbeatRequest represents a heartbeat sent to kafka to indicate consume liveness. +type HeartbeatRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // GroupID is the ID of the group. + GroupID string + + // GenerationID is the current generation for the group. + GenerationID int32 + + // MemberID is the ID of the group member. + MemberID string + + // GroupInstanceID is a unique identifier for the consumer. + GroupInstanceID string +} + +// HeartbeatResponse represents a response from a heartbeat request. +type HeartbeatResponse struct { + // Error is set to non-nil if an error occurred. + Error error + + // The amount of time that the broker throttled the request. + // + // This field will be zero if the kafka broker did not support the + // Heartbeat API in version 1 or above. + Throttle time.Duration +} + +type heartbeatRequestV0 struct { + // GroupID holds the unique group identifier + GroupID string + + // GenerationID holds the generation of the group. + GenerationID int32 + + // MemberID assigned by the group coordinator + MemberID string +} + +// Heartbeat sends a heartbeat request to a kafka broker and returns the response. +func (c *Client) Heartbeat(ctx context.Context, req *HeartbeatRequest) (*HeartbeatResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &heartbeatAPI.Request{ + GroupID: req.GroupID, + GenerationID: req.GenerationID, + MemberID: req.MemberID, + GroupInstanceID: req.GroupInstanceID, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).Heartbeat: %w", err) + } + + res := m.(*heartbeatAPI.Response) + + ret := &HeartbeatResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + } + + if res.ErrorCode != 0 { + ret.Error = Error(res.ErrorCode) + } + + return ret, nil +} + +func (t heartbeatRequestV0) size() int32 { + return sizeofString(t.GroupID) + + sizeofInt32(t.GenerationID) + + sizeofString(t.MemberID) +} + +func (t heartbeatRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) +} + +type heartbeatResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t heartbeatResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) +} + +func (t heartbeatResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) +} + +func (t *heartbeatResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/incrementalalterconfigs.go b/vendor/github.com/segmentio/kafka-go/incrementalalterconfigs.go new file mode 100644 index 00000000000..a14714d87e6 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/incrementalalterconfigs.go @@ -0,0 +1,133 @@ +package kafka + +import ( + "context" + "net" + + "github.com/segmentio/kafka-go/protocol/incrementalalterconfigs" +) + +type ConfigOperation int8 + +const ( + ConfigOperationSet ConfigOperation = 0 + ConfigOperationDelete ConfigOperation = 1 + ConfigOperationAppend ConfigOperation = 2 + ConfigOperationSubtract ConfigOperation = 3 +) + +// IncrementalAlterConfigsRequest is a request to the IncrementalAlterConfigs API. +type IncrementalAlterConfigsRequest struct { + // Addr is the address of the kafka broker to send the request to. + Addr net.Addr + + // Resources contains the list of resources to update configs for. + Resources []IncrementalAlterConfigsRequestResource + + // ValidateOnly indicates whether Kafka should validate the changes without actually + // applying them. + ValidateOnly bool +} + +// IncrementalAlterConfigsRequestResource contains the details of a single resource type whose +// configs should be altered. +type IncrementalAlterConfigsRequestResource struct { + // ResourceType is the type of resource to update. + ResourceType ResourceType + + // ResourceName is the name of the resource to update (i.e., topic name or broker ID). + ResourceName string + + // Configs contains the list of config key/values to update. + Configs []IncrementalAlterConfigsRequestConfig +} + +// IncrementalAlterConfigsRequestConfig describes a single config key/value pair that should +// be altered. +type IncrementalAlterConfigsRequestConfig struct { + // Name is the name of the config. + Name string + + // Value is the value to set for this config. + Value string + + // ConfigOperation indicates how this config should be updated (e.g., add, delete, etc.). + ConfigOperation ConfigOperation +} + +// IncrementalAlterConfigsResponse is a response from the IncrementalAlterConfigs API. +type IncrementalAlterConfigsResponse struct { + // Resources contains details of each resource config that was updated. + Resources []IncrementalAlterConfigsResponseResource +} + +// IncrementalAlterConfigsResponseResource contains the response details for a single resource +// whose configs were updated. +type IncrementalAlterConfigsResponseResource struct { + // Error is set to a non-nil value if an error occurred while updating this specific + // config. + Error error + + // ResourceType is the type of resource that was updated. + ResourceType ResourceType + + // ResourceName is the name of the resource that was updated. + ResourceName string +} + +func (c *Client) IncrementalAlterConfigs( + ctx context.Context, + req *IncrementalAlterConfigsRequest, +) (*IncrementalAlterConfigsResponse, error) { + apiReq := &incrementalalterconfigs.Request{ + ValidateOnly: req.ValidateOnly, + } + + for _, res := range req.Resources { + apiRes := incrementalalterconfigs.RequestResource{ + ResourceType: int8(res.ResourceType), + ResourceName: res.ResourceName, + } + + for _, config := range res.Configs { + apiRes.Configs = append( + apiRes.Configs, + incrementalalterconfigs.RequestConfig{ + Name: config.Name, + Value: config.Value, + ConfigOperation: int8(config.ConfigOperation), + }, + ) + } + + apiReq.Resources = append( + apiReq.Resources, + apiRes, + ) + } + + protoResp, err := c.roundTrip( + ctx, + req.Addr, + apiReq, + ) + if err != nil { + return nil, err + } + + resp := &IncrementalAlterConfigsResponse{} + + apiResp := protoResp.(*incrementalalterconfigs.Response) + for _, res := range apiResp.Responses { + resp.Resources = append( + resp.Resources, + IncrementalAlterConfigsResponseResource{ + Error: makeError(res.ErrorCode, res.ErrorMessage), + ResourceType: ResourceType(res.ResourceType), + ResourceName: res.ResourceName, + }, + ) + } + + return resp, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/initproducerid.go b/vendor/github.com/segmentio/kafka-go/initproducerid.go new file mode 100644 index 00000000000..5cc6b8f243f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/initproducerid.go @@ -0,0 +1,82 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/initproducerid" +) + +// InitProducerIDRequest is the request structure for the InitProducerId function. +type InitProducerIDRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key. + TransactionalID string + + // Time after which a transaction should time out + TransactionTimeoutMs int + + // The Producer ID (PID). + // This is used to disambiguate requests if a transactional id is reused following its expiration. + // Only supported in version >=3 of the request, will be ignore otherwise. + ProducerID int + + // The producer's current epoch. + // This will be checked against the producer epoch on the broker, + // and the request will return an error if they do not match. + // Only supported in version >=3 of the request, will be ignore otherwise. + ProducerEpoch int +} + +// ProducerSession contains useful information about the producer session from the broker's response. +type ProducerSession struct { + // The Producer ID (PID) for the current producer session + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int +} + +// InitProducerIDResponse is the response structure for the InitProducerId function. +type InitProducerIDResponse struct { + // The Transaction/Group Coordinator details + Producer *ProducerSession + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // An error that may have occurred while attempting to retrieve initProducerId + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. + Error error +} + +// InitProducerID sends a initProducerId request to a kafka broker and returns the +// response. +func (c *Client) InitProducerID(ctx context.Context, req *InitProducerIDRequest) (*InitProducerIDResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &initproducerid.Request{ + TransactionalID: req.TransactionalID, + TransactionTimeoutMs: int32(req.TransactionTimeoutMs), + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).InitProducerId: %w", err) + } + + res := m.(*initproducerid.Response) + + return &InitProducerIDResponse{ + Producer: &ProducerSession{ + ProducerID: int(res.ProducerID), + ProducerEpoch: int(res.ProducerEpoch), + }, + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(res.ErrorCode, ""), + }, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/joingroup.go b/vendor/github.com/segmentio/kafka-go/joingroup.go new file mode 100644 index 00000000000..30823a69a78 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/joingroup.go @@ -0,0 +1,377 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/consumer" + "github.com/segmentio/kafka-go/protocol/joingroup" +) + +// JoinGroupRequest is the request structure for the JoinGroup function. +type JoinGroupRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // GroupID of the group to join. + GroupID string + + // The duration after which the coordinator considers the consumer dead + // if it has not received a heartbeat. + SessionTimeout time.Duration + + // The duration the coordination will wait for each member to rejoin when rebalancing the group. + RebalanceTimeout time.Duration + + // The ID assigned by the group coordinator. + MemberID string + + // The unique identifier for the consumer instance. + GroupInstanceID string + + // The name for the class of protocols implemented by the group being joined. + ProtocolType string + + // The list of protocols the member supports. + Protocols []GroupProtocol +} + +// GroupProtocol represents a consumer group protocol. +type GroupProtocol struct { + // The protocol name. + Name string + + // The protocol metadata. + Metadata GroupProtocolSubscription +} + +type GroupProtocolSubscription struct { + // The Topics to subscribe to. + Topics []string + + // UserData assosiated with the subscription for the given protocol + UserData []byte + + // Partitions owned by this consumer. + OwnedPartitions map[string][]int +} + +// JoinGroupResponse is the response structure for the JoinGroup function. +type JoinGroupResponse struct { + // An error that may have occurred when attempting to join the group. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // The generation ID of the group. + GenerationID int + + // The group protocol selected by the coordinatior. + ProtocolName string + + // The group protocol name. + ProtocolType string + + // The leader of the group. + LeaderID string + + // The group member ID. + MemberID string + + // The members of the group. + Members []JoinGroupResponseMember +} + +// JoinGroupResponseMember represents a group memmber in a reponse to a JoinGroup request. +type JoinGroupResponseMember struct { + // The group memmber ID. + ID string + + // The unique identifier of the consumer instance. + GroupInstanceID string + + // The group member metadata. + Metadata GroupProtocolSubscription +} + +// JoinGroup sends a join group request to the coordinator and returns the response. +func (c *Client) JoinGroup(ctx context.Context, req *JoinGroupRequest) (*JoinGroupResponse, error) { + joinGroup := joingroup.Request{ + GroupID: req.GroupID, + SessionTimeoutMS: int32(req.SessionTimeout.Milliseconds()), + RebalanceTimeoutMS: int32(req.RebalanceTimeout.Milliseconds()), + MemberID: req.MemberID, + GroupInstanceID: req.GroupInstanceID, + ProtocolType: req.ProtocolType, + Protocols: make([]joingroup.RequestProtocol, 0, len(req.Protocols)), + } + + for _, proto := range req.Protocols { + protoMeta := consumer.Subscription{ + Version: consumer.MaxVersionSupported, + Topics: proto.Metadata.Topics, + UserData: proto.Metadata.UserData, + OwnedPartitions: make([]consumer.TopicPartition, 0, len(proto.Metadata.OwnedPartitions)), + } + for topic, partitions := range proto.Metadata.OwnedPartitions { + tp := consumer.TopicPartition{ + Topic: topic, + Partitions: make([]int32, 0, len(partitions)), + } + for _, partition := range partitions { + tp.Partitions = append(tp.Partitions, int32(partition)) + } + protoMeta.OwnedPartitions = append(protoMeta.OwnedPartitions, tp) + } + + metaBytes, err := protocol.Marshal(consumer.MaxVersionSupported, protoMeta) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err) + } + + joinGroup.Protocols = append(joinGroup.Protocols, joingroup.RequestProtocol{ + Name: proto.Name, + Metadata: metaBytes, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &joinGroup) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err) + } + + r := m.(*joingroup.Response) + + res := &JoinGroupResponse{ + Error: makeError(r.ErrorCode, ""), + Throttle: makeDuration(r.ThrottleTimeMS), + GenerationID: int(r.GenerationID), + ProtocolName: r.ProtocolName, + ProtocolType: r.ProtocolType, + LeaderID: r.LeaderID, + MemberID: r.MemberID, + Members: make([]JoinGroupResponseMember, 0, len(r.Members)), + } + + for _, member := range r.Members { + var meta consumer.Subscription + err = protocol.Unmarshal(member.Metadata, consumer.MaxVersionSupported, &meta) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).JoinGroup: %w", err) + } + subscription := GroupProtocolSubscription{ + Topics: meta.Topics, + UserData: meta.UserData, + OwnedPartitions: make(map[string][]int, len(meta.OwnedPartitions)), + } + for _, owned := range meta.OwnedPartitions { + subscription.OwnedPartitions[owned.Topic] = make([]int, 0, len(owned.Partitions)) + for _, partition := range owned.Partitions { + subscription.OwnedPartitions[owned.Topic] = append(subscription.OwnedPartitions[owned.Topic], int(partition)) + } + } + res.Members = append(res.Members, JoinGroupResponseMember{ + ID: member.MemberID, + GroupInstanceID: member.GroupInstanceID, + Metadata: subscription, + }) + } + + return res, nil +} + +type groupMetadata struct { + Version int16 + Topics []string + UserData []byte +} + +func (t groupMetadata) size() int32 { + return sizeofInt16(t.Version) + + sizeofStringArray(t.Topics) + + sizeofBytes(t.UserData) +} + +func (t groupMetadata) writeTo(wb *writeBuffer) { + wb.writeInt16(t.Version) + wb.writeStringArray(t.Topics) + wb.writeBytes(t.UserData) +} + +func (t groupMetadata) bytes() []byte { + buf := bytes.NewBuffer(nil) + t.writeTo(&writeBuffer{w: buf}) + return buf.Bytes() +} + +func (t *groupMetadata) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt16(r, size, &t.Version); err != nil { + return + } + if remain, err = readStringArray(r, remain, &t.Topics); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.UserData); err != nil { + return + } + return +} + +type joinGroupRequestGroupProtocolV1 struct { + ProtocolName string + ProtocolMetadata []byte +} + +func (t joinGroupRequestGroupProtocolV1) size() int32 { + return sizeofString(t.ProtocolName) + + sizeofBytes(t.ProtocolMetadata) +} + +func (t joinGroupRequestGroupProtocolV1) writeTo(wb *writeBuffer) { + wb.writeString(t.ProtocolName) + wb.writeBytes(t.ProtocolMetadata) +} + +type joinGroupRequestV1 struct { + // GroupID holds the unique group identifier + GroupID string + + // SessionTimeout holds the coordinator considers the consumer dead if it + // receives no heartbeat after this timeout in ms. + SessionTimeout int32 + + // RebalanceTimeout holds the maximum time that the coordinator will wait + // for each member to rejoin when rebalancing the group in ms + RebalanceTimeout int32 + + // MemberID assigned by the group coordinator or the zero string if joining + // for the first time. + MemberID string + + // ProtocolType holds the unique name for class of protocols implemented by group + ProtocolType string + + // GroupProtocols holds the list of protocols that the member supports + GroupProtocols []joinGroupRequestGroupProtocolV1 +} + +func (t joinGroupRequestV1) size() int32 { + return sizeofString(t.GroupID) + + sizeofInt32(t.SessionTimeout) + + sizeofInt32(t.RebalanceTimeout) + + sizeofString(t.MemberID) + + sizeofString(t.ProtocolType) + + sizeofArray(len(t.GroupProtocols), func(i int) int32 { return t.GroupProtocols[i].size() }) +} + +func (t joinGroupRequestV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.SessionTimeout) + wb.writeInt32(t.RebalanceTimeout) + wb.writeString(t.MemberID) + wb.writeString(t.ProtocolType) + wb.writeArray(len(t.GroupProtocols), func(i int) { t.GroupProtocols[i].writeTo(wb) }) +} + +type joinGroupResponseMemberV1 struct { + // MemberID assigned by the group coordinator + MemberID string + MemberMetadata []byte +} + +func (t joinGroupResponseMemberV1) size() int32 { + return sizeofString(t.MemberID) + + sizeofBytes(t.MemberMetadata) +} + +func (t joinGroupResponseMemberV1) writeTo(wb *writeBuffer) { + wb.writeString(t.MemberID) + wb.writeBytes(t.MemberMetadata) +} + +func (t *joinGroupResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.MemberID); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.MemberMetadata); err != nil { + return + } + return +} + +type joinGroupResponseV1 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // GenerationID holds the generation of the group. + GenerationID int32 + + // GroupProtocol holds the group protocol selected by the coordinator + GroupProtocol string + + // LeaderID holds the leader of the group + LeaderID string + + // MemberID assigned by the group coordinator + MemberID string + Members []joinGroupResponseMemberV1 +} + +func (t joinGroupResponseV1) size() int32 { + return sizeofInt16(t.ErrorCode) + + sizeofInt32(t.GenerationID) + + sizeofString(t.GroupProtocol) + + sizeofString(t.LeaderID) + + sizeofString(t.MemberID) + + sizeofArray(len(t.MemberID), func(i int) int32 { return t.Members[i].size() }) +} + +func (t joinGroupResponseV1) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeInt32(t.GenerationID) + wb.writeString(t.GroupProtocol) + wb.writeString(t.LeaderID) + wb.writeString(t.MemberID) + wb.writeArray(len(t.Members), func(i int) { t.Members[i].writeTo(wb) }) +} + +func (t *joinGroupResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { + return + } + if remain, err = readInt32(r, remain, &t.GenerationID); err != nil { + return + } + if remain, err = readString(r, remain, &t.GroupProtocol); err != nil { + return + } + if remain, err = readString(r, remain, &t.LeaderID); err != nil { + return + } + if remain, err = readString(r, remain, &t.MemberID); err != nil { + return + } + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var item joinGroupResponseMemberV1 + if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil { + return + } + t.Members = append(t.Members, item) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + return +} diff --git a/vendor/github.com/segmentio/kafka-go/kafka.go b/vendor/github.com/segmentio/kafka-go/kafka.go new file mode 100644 index 00000000000..d2d36e413c9 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/kafka.go @@ -0,0 +1,100 @@ +package kafka + +import "github.com/segmentio/kafka-go/protocol" + +// Broker represents a kafka broker in a kafka cluster. +type Broker struct { + Host string + Port int + ID int + Rack string +} + +// Topic represents a topic in a kafka cluster. +type Topic struct { + // Name of the topic. + Name string + + // True if the topic is internal. + Internal bool + + // The list of partition currently available on this topic. + Partitions []Partition + + // An error that may have occurred while attempting to read the topic + // metadata. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// Partition carries the metadata associated with a kafka partition. +type Partition struct { + // Name of the topic that the partition belongs to, and its index in the + // topic. + Topic string + ID int + + // Leader, replicas, and ISR for the partition. + // + // When no physical host is known to be running a broker, the Host and Port + // fields will be set to the zero values. The logical broker ID is always + // set to the value known to the kafka cluster, even if the broker is not + // currently backed by a physical host. + Leader Broker + Replicas []Broker + Isr []Broker + + // Available only with metadata API level >= 6: + OfflineReplicas []Broker + + // An error that may have occurred while attempting to read the partition + // metadata. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// Marshal encodes v into a binary representation of the value in the kafka data +// format. +// +// If v is a, or contains struct types, the kafka struct fields are interpreted +// and may contain one of these values: +// +// nullable valid on bytes and strings, encodes as a nullable value +// compact valid on strings, encodes as a compact string +// +// The kafka struct tags should not contain min and max versions. If you need to +// encode types based on specific versions of kafka APIs, use the Version type +// instead. +func Marshal(v interface{}) ([]byte, error) { + return protocol.Marshal(-1, v) +} + +// Unmarshal decodes a binary representation from b into v. +// +// See Marshal for details. +func Unmarshal(b []byte, v interface{}) error { + return protocol.Unmarshal(b, -1, v) +} + +// Version represents a version number for kafka APIs. +type Version int16 + +// Marshal is like the top-level Marshal function, but will only encode struct +// fields for which n falls within the min and max versions specified on the +// struct tag. +func (n Version) Marshal(v interface{}) ([]byte, error) { + return protocol.Marshal(int16(n), v) +} + +// Unmarshal is like the top-level Unmarshal function, but will only decode +// struct fields for which n falls within the min and max versions specified on +// the struct tag. +func (n Version) Unmarshal(b []byte, v interface{}) error { + return protocol.Unmarshal(b, int16(n), v) +} diff --git a/vendor/github.com/segmentio/kafka-go/leavegroup.go b/vendor/github.com/segmentio/kafka-go/leavegroup.go new file mode 100644 index 00000000000..ad59a55c01b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/leavegroup.go @@ -0,0 +1,147 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/leavegroup" +) + +// LeaveGroupRequest is the request structure for the LeaveGroup function. +type LeaveGroupRequest struct { + // Address of the kafka broker to sent he request to. + Addr net.Addr + + // GroupID of the group to leave. + GroupID string + + // List of leaving member identities. + Members []LeaveGroupRequestMember +} + +// LeaveGroupRequestMember represents the indentify of a member leaving a group. +type LeaveGroupRequestMember struct { + // The member ID to remove from the group. + ID string + + // The group instance ID to remove from the group. + GroupInstanceID string +} + +// LeaveGroupResponse is the response structure for the LeaveGroup function. +type LeaveGroupResponse struct { + // An error that may have occurred when attempting to leave the group. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // List of leaving member responses. + Members []LeaveGroupResponseMember +} + +// LeaveGroupResponseMember represents a member leaving the group. +type LeaveGroupResponseMember struct { + // The member ID of the member leaving the group. + ID string + + // The group instance ID to remove from the group. + GroupInstanceID string + + // An error that may have occured when attempting to remove the member from the group. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error +} + +func (c *Client) LeaveGroup(ctx context.Context, req *LeaveGroupRequest) (*LeaveGroupResponse, error) { + leaveGroup := leavegroup.Request{ + GroupID: req.GroupID, + Members: make([]leavegroup.RequestMember, 0, len(req.Members)), + } + + for _, member := range req.Members { + leaveGroup.Members = append(leaveGroup.Members, leavegroup.RequestMember{ + MemberID: member.ID, + GroupInstanceID: member.GroupInstanceID, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &leaveGroup) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).LeaveGroup: %w", err) + } + + r := m.(*leavegroup.Response) + + res := &LeaveGroupResponse{ + Error: makeError(r.ErrorCode, ""), + Throttle: makeDuration(r.ThrottleTimeMS), + } + + if len(r.Members) == 0 { + // If we're using a version of the api without the + // members array in the response, just add a member + // so the api is consistent across versions. + r.Members = []leavegroup.ResponseMember{ + { + MemberID: req.Members[0].ID, + GroupInstanceID: req.Members[0].GroupInstanceID, + }, + } + } + + res.Members = make([]LeaveGroupResponseMember, 0, len(r.Members)) + for _, member := range r.Members { + res.Members = append(res.Members, LeaveGroupResponseMember{ + ID: member.MemberID, + GroupInstanceID: member.GroupInstanceID, + Error: makeError(member.ErrorCode, ""), + }) + } + + return res, nil +} + +type leaveGroupRequestV0 struct { + // GroupID holds the unique group identifier + GroupID string + + // MemberID assigned by the group coordinator or the zero string if joining + // for the first time. + MemberID string +} + +func (t leaveGroupRequestV0) size() int32 { + return sizeofString(t.GroupID) + sizeofString(t.MemberID) +} + +func (t leaveGroupRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeString(t.MemberID) +} + +type leaveGroupResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t leaveGroupResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) +} + +func (t leaveGroupResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) +} + +func (t *leaveGroupResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + remain, err = readInt16(r, size, &t.ErrorCode) + return +} diff --git a/vendor/github.com/segmentio/kafka-go/listgroups.go b/vendor/github.com/segmentio/kafka-go/listgroups.go new file mode 100644 index 00000000000..229de9352d4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/listgroups.go @@ -0,0 +1,139 @@ +package kafka + +import ( + "bufio" + "context" + "net" + + "github.com/segmentio/kafka-go/protocol/listgroups" +) + +// ListGroupsRequest is a request to the ListGroups API. +type ListGroupsRequest struct { + // Addr is the address of the kafka broker to send the request to. + Addr net.Addr +} + +// ListGroupsResponse is a response from the ListGroups API. +type ListGroupsResponse struct { + // Error is set to a non-nil value if a top-level error occurred while fetching + // groups. + Error error + + // Groups contains the list of groups. + Groups []ListGroupsResponseGroup +} + +// ListGroupsResponseGroup contains the response details for a single group. +type ListGroupsResponseGroup struct { + // GroupID is the ID of the group. + GroupID string + + // Coordinator is the ID of the coordinator broker for the group. + Coordinator int +} + +func (c *Client) ListGroups( + ctx context.Context, + req *ListGroupsRequest, +) (*ListGroupsResponse, error) { + protoResp, err := c.roundTrip(ctx, req.Addr, &listgroups.Request{}) + if err != nil { + return nil, err + } + apiResp := protoResp.(*listgroups.Response) + resp := &ListGroupsResponse{ + Error: makeError(apiResp.ErrorCode, ""), + } + + for _, apiGroupInfo := range apiResp.Groups { + resp.Groups = append(resp.Groups, ListGroupsResponseGroup{ + GroupID: apiGroupInfo.GroupID, + Coordinator: int(apiGroupInfo.BrokerID), + }) + } + + return resp, nil +} + +// TODO: Remove everything below and use protocol-based version above everywhere. +type listGroupsRequestV1 struct { +} + +func (t listGroupsRequestV1) size() int32 { + return 0 +} + +func (t listGroupsRequestV1) writeTo(wb *writeBuffer) { +} + +type listGroupsResponseGroupV1 struct { + // GroupID holds the unique group identifier + GroupID string + ProtocolType string +} + +func (t listGroupsResponseGroupV1) size() int32 { + return sizeofString(t.GroupID) + sizeofString(t.ProtocolType) +} + +func (t listGroupsResponseGroupV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeString(t.ProtocolType) +} + +func (t *listGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.GroupID); err != nil { + return + } + if remain, err = readString(r, remain, &t.ProtocolType); err != nil { + return + } + return +} + +type listGroupsResponseV1 struct { + // ThrottleTimeMS holds the duration in milliseconds for which the request + // was throttled due to quota violation (Zero if the request did not violate + // any quota) + ThrottleTimeMS int32 + + // ErrorCode holds response error code + ErrorCode int16 + Groups []listGroupsResponseGroupV1 +} + +func (t listGroupsResponseV1) size() int32 { + return sizeofInt32(t.ThrottleTimeMS) + + sizeofInt16(t.ErrorCode) + + sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() }) +} + +func (t listGroupsResponseV1) writeTo(wb *writeBuffer) { + wb.writeInt32(t.ThrottleTimeMS) + wb.writeInt16(t.ErrorCode) + wb.writeArray(len(t.Groups), func(i int) { t.Groups[i].writeTo(wb) }) +} + +func (t *listGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + + fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + var item listGroupsResponseGroupV1 + if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil { + return + } + t.Groups = append(t.Groups, item) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + return +} diff --git a/vendor/github.com/segmentio/kafka-go/listoffset.go b/vendor/github.com/segmentio/kafka-go/listoffset.go new file mode 100644 index 00000000000..11c5d04b4d5 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/listoffset.go @@ -0,0 +1,286 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/listoffsets" +) + +// OffsetRequest represents a request to retrieve a single partition offset. +type OffsetRequest struct { + Partition int + Timestamp int64 +} + +// FirstOffsetOf constructs an OffsetRequest which asks for the first offset of +// the parition given as argument. +func FirstOffsetOf(partition int) OffsetRequest { + return OffsetRequest{Partition: partition, Timestamp: FirstOffset} +} + +// LastOffsetOf constructs an OffsetRequest which asks for the last offset of +// the partition given as argument. +func LastOffsetOf(partition int) OffsetRequest { + return OffsetRequest{Partition: partition, Timestamp: LastOffset} +} + +// TimeOffsetOf constructs an OffsetRequest which asks for a partition offset +// at a given time. +func TimeOffsetOf(partition int, at time.Time) OffsetRequest { + return OffsetRequest{Partition: partition, Timestamp: timestamp(at)} +} + +// PartitionOffsets carries information about offsets available in a topic +// partition. +type PartitionOffsets struct { + Partition int + FirstOffset int64 + LastOffset int64 + Offsets map[int64]time.Time + Error error +} + +// ListOffsetsRequest represents a request sent to a kafka broker to list of the +// offsets of topic partitions. +type ListOffsetsRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // A mapping of topic names to list of partitions that the program wishes to + // get the offsets for. + Topics map[string][]OffsetRequest + + // The isolation level for the request. + // + // Defaults to ReadUncommitted. + // + // This field requires the kafka broker to support the ListOffsets API in + // version 2 or above (otherwise the value is ignored). + IsolationLevel IsolationLevel +} + +// ListOffsetsResponse represents a response from a kafka broker to a offset +// listing request. +type ListOffsetsResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Mappings of topics names to partition offsets, there will be one entry + // for each topic in the request. + Topics map[string][]PartitionOffsets +} + +// ListOffsets sends an offset request to a kafka broker and returns the +// response. +func (c *Client) ListOffsets(ctx context.Context, req *ListOffsetsRequest) (*ListOffsetsResponse, error) { + type topicPartition struct { + topic string + partition int + } + + partitionOffsets := make(map[topicPartition]PartitionOffsets) + + for topicName, requests := range req.Topics { + for _, r := range requests { + key := topicPartition{ + topic: topicName, + partition: r.Partition, + } + + partition, ok := partitionOffsets[key] + if !ok { + partition = PartitionOffsets{ + Partition: r.Partition, + FirstOffset: -1, + LastOffset: -1, + Offsets: make(map[int64]time.Time), + } + } + + switch r.Timestamp { + case FirstOffset: + partition.FirstOffset = 0 + case LastOffset: + partition.LastOffset = 0 + } + + partitionOffsets[topicPartition{ + topic: topicName, + partition: r.Partition, + }] = partition + } + } + + topics := make([]listoffsets.RequestTopic, 0, len(req.Topics)) + + for topicName, requests := range req.Topics { + partitions := make([]listoffsets.RequestPartition, len(requests)) + + for i, r := range requests { + partitions[i] = listoffsets.RequestPartition{ + Partition: int32(r.Partition), + CurrentLeaderEpoch: -1, + Timestamp: r.Timestamp, + } + } + + topics = append(topics, listoffsets.RequestTopic{ + Topic: topicName, + Partitions: partitions, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &listoffsets.Request{ + ReplicaID: -1, + IsolationLevel: int8(req.IsolationLevel), + Topics: topics, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).ListOffsets: %w", err) + } + + res := m.(*listoffsets.Response) + ret := &ListOffsetsResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Topics: make(map[string][]PartitionOffsets, len(res.Topics)), + } + + for _, t := range res.Topics { + for _, p := range t.Partitions { + key := topicPartition{ + topic: t.Topic, + partition: int(p.Partition), + } + + partition := partitionOffsets[key] + + switch p.Timestamp { + case FirstOffset: + partition.FirstOffset = p.Offset + case LastOffset: + partition.LastOffset = p.Offset + default: + partition.Offsets[p.Offset] = makeTime(p.Timestamp) + } + + if p.ErrorCode != 0 { + partition.Error = Error(p.ErrorCode) + } + + partitionOffsets[key] = partition + } + } + + for key, partition := range partitionOffsets { + ret.Topics[key.topic] = append(ret.Topics[key.topic], partition) + } + + return ret, nil +} + +type listOffsetRequestV1 struct { + ReplicaID int32 + Topics []listOffsetRequestTopicV1 +} + +func (r listOffsetRequestV1) size() int32 { + return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) +} + +func (r listOffsetRequestV1) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ReplicaID) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type listOffsetRequestTopicV1 struct { + TopicName string + Partitions []listOffsetRequestPartitionV1 +} + +func (t listOffsetRequestTopicV1) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t listOffsetRequestTopicV1) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type listOffsetRequestPartitionV1 struct { + Partition int32 + Time int64 +} + +func (p listOffsetRequestPartitionV1) size() int32 { + return 4 + 8 +} + +func (p listOffsetRequestPartitionV1) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt64(p.Time) +} + +type listOffsetResponseV1 []listOffsetResponseTopicV1 + +func (r listOffsetResponseV1) size() int32 { + return sizeofArray(len(r), func(i int) int32 { return r[i].size() }) +} + +func (r listOffsetResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(r), func(i int) { r[i].writeTo(wb) }) +} + +type listOffsetResponseTopicV1 struct { + TopicName string + PartitionOffsets []partitionOffsetV1 +} + +func (t listOffsetResponseTopicV1) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.PartitionOffsets), func(i int) int32 { return t.PartitionOffsets[i].size() }) +} + +func (t listOffsetResponseTopicV1) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.PartitionOffsets), func(i int) { t.PartitionOffsets[i].writeTo(wb) }) +} + +type partitionOffsetV1 struct { + Partition int32 + ErrorCode int16 + Timestamp int64 + Offset int64 +} + +func (p partitionOffsetV1) size() int32 { + return 4 + 2 + 8 + 8 +} + +func (p partitionOffsetV1) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Timestamp) + wb.writeInt64(p.Offset) +} + +func (p *partitionOffsetV1) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt32(r, sz, &p.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Offset); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/logger.go b/vendor/github.com/segmentio/kafka-go/logger.go new file mode 100644 index 00000000000..d359ab789d7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/logger.go @@ -0,0 +1,17 @@ +package kafka + +// Logger interface API for log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// LoggerFunc is a bridge between Logger and any third party logger +// Usage: +// l := NewLogger() // some logger +// r := kafka.NewReader(kafka.ReaderConfig{ +// Logger: kafka.LoggerFunc(l.Infof), +// ErrorLogger: kafka.LoggerFunc(l.Errorf), +// }) +type LoggerFunc func(string, ...interface{}) + +func (f LoggerFunc) Printf(msg string, args ...interface{}) { f(msg, args...) } diff --git a/vendor/github.com/segmentio/kafka-go/message.go b/vendor/github.com/segmentio/kafka-go/message.go new file mode 100644 index 00000000000..0539e603834 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/message.go @@ -0,0 +1,132 @@ +package kafka + +import ( + "time" +) + +// Message is a data structure representing kafka messages. +type Message struct { + // Topic indicates which topic this message was consumed from via Reader. + // + // When being used with Writer, this can be used to configure the topic if + // not already specified on the writer itself. + Topic string + + // Partition is read-only and MUST NOT be set when writing messages + Partition int + Offset int64 + HighWaterMark int64 + Key []byte + Value []byte + Headers []Header + + // This field is used to hold arbitrary data you wish to include, so it + // will be available when handle it on the Writer's `Completion` method, + // this support the application can do any post operation on each message. + WriterData interface{} + + // If not set at the creation, Time will be automatically set when + // writing the message. + Time time.Time +} + +func (msg Message) message(cw *crc32Writer) message { + m := message{ + MagicByte: 1, + Key: msg.Key, + Value: msg.Value, + Timestamp: timestamp(msg.Time), + } + if cw != nil { + m.CRC = m.crc32(cw) + } + return m +} + +const timestampSize = 8 + +func (msg *Message) size() int32 { + return 4 + 1 + 1 + sizeofBytes(msg.Key) + sizeofBytes(msg.Value) + timestampSize +} + +func (msg *Message) headerSize() int { + return varArrayLen(len(msg.Headers), func(i int) int { + h := &msg.Headers[i] + return varStringLen(h.Key) + varBytesLen(h.Value) + }) +} + +func (msg *Message) totalSize() int32 { + return int32(msg.headerSize()) + msg.size() +} + +type message struct { + CRC int32 + MagicByte int8 + Attributes int8 + Timestamp int64 + Key []byte + Value []byte +} + +func (m message) crc32(cw *crc32Writer) int32 { + cw.crc32 = 0 + cw.writeInt8(m.MagicByte) + cw.writeInt8(m.Attributes) + if m.MagicByte != 0 { + cw.writeInt64(m.Timestamp) + } + cw.writeBytes(m.Key) + cw.writeBytes(m.Value) + return int32(cw.crc32) +} + +func (m message) size() int32 { + size := 4 + 1 + 1 + sizeofBytes(m.Key) + sizeofBytes(m.Value) + if m.MagicByte != 0 { + size += timestampSize + } + return size +} + +func (m message) writeTo(wb *writeBuffer) { + wb.writeInt32(m.CRC) + wb.writeInt8(m.MagicByte) + wb.writeInt8(m.Attributes) + if m.MagicByte != 0 { + wb.writeInt64(m.Timestamp) + } + wb.writeBytes(m.Key) + wb.writeBytes(m.Value) +} + +type messageSetItem struct { + Offset int64 + MessageSize int32 + Message message +} + +func (m messageSetItem) size() int32 { + return 8 + 4 + m.Message.size() +} + +func (m messageSetItem) writeTo(wb *writeBuffer) { + wb.writeInt64(m.Offset) + wb.writeInt32(m.MessageSize) + m.Message.writeTo(wb) +} + +type messageSet []messageSetItem + +func (s messageSet) size() (size int32) { + for _, m := range s { + size += m.size() + } + return +} + +func (s messageSet) writeTo(wb *writeBuffer) { + for _, m := range s { + m.writeTo(wb) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/message_reader.go b/vendor/github.com/segmentio/kafka-go/message_reader.go new file mode 100644 index 00000000000..a0a0385ef51 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/message_reader.go @@ -0,0 +1,555 @@ +package kafka + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" +) + +type readBytesFunc func(*bufio.Reader, int, int) (int, error) + +// messageSetReader processes the messages encoded into a fetch response. +// The response may contain a mix of Record Batches (newer format) and Messages +// (older format). +type messageSetReader struct { + *readerStack // used for decompressing compressed messages and record batches + empty bool // if true, short circuits messageSetReader methods + debug bool // enable debug log messages + // How many bytes are expected to remain in the response. + // + // This is used to detect truncation of the response. + lengthRemain int + + decompressed *bytes.Buffer +} + +type readerStack struct { + reader *bufio.Reader + remain int + base int64 + parent *readerStack + count int // how many messages left in the current message set + header messagesHeader // the current header for a subset of messages within the set. +} + +// messagesHeader describes a set of records. there may be many messagesHeader's in a message set. +type messagesHeader struct { + firstOffset int64 + length int32 + crc int32 + magic int8 + // v1 composes attributes specific to v0 and v1 message headers + v1 struct { + attributes int8 + timestamp int64 + } + // v2 composes attributes specific to v2 message headers + v2 struct { + leaderEpoch int32 + attributes int16 + lastOffsetDelta int32 + firstTimestamp int64 + lastTimestamp int64 + producerID int64 + producerEpoch int16 + baseSequence int32 + count int32 + } +} + +func (h messagesHeader) compression() (codec CompressionCodec, err error) { + const compressionCodecMask = 0x07 + var code int8 + switch h.magic { + case 0, 1: + code = h.v1.attributes & compressionCodecMask + case 2: + code = int8(h.v2.attributes & compressionCodecMask) + default: + err = h.badMagic() + return + } + if code != 0 { + codec, err = resolveCodec(code) + } + return +} + +func (h messagesHeader) badMagic() error { + return fmt.Errorf("unsupported magic byte %d in header", h.magic) +} + +func newMessageSetReader(reader *bufio.Reader, remain int) (*messageSetReader, error) { + res := &messageSetReader{ + readerStack: &readerStack{ + reader: reader, + remain: remain, + }, + decompressed: acquireBuffer(), + } + err := res.readHeader() + return res, err +} + +func (r *messageSetReader) remaining() (remain int) { + if r.empty { + return 0 + } + for s := r.readerStack; s != nil; s = s.parent { + remain += s.remain + } + return +} + +func (r *messageSetReader) discard() (err error) { + switch { + case r.empty: + case r.readerStack == nil: + default: + // rewind up to the top-most reader b/c it's the only one that's doing + // actual i/o. the rest are byte buffers that have been pushed on the stack + // while reading compressed message sets. + for r.parent != nil { + r.readerStack = r.parent + } + err = r.discardN(r.remain) + } + return +} + +func (r *messageSetReader) readMessage(min int64, key readBytesFunc, val readBytesFunc) ( + offset int64, lastOffset int64, timestamp int64, headers []Header, err error) { + + if r.empty { + err = RequestTimedOut + return + } + if err = r.readHeader(); err != nil { + return + } + switch r.header.magic { + case 0, 1: + offset, timestamp, headers, err = r.readMessageV1(min, key, val) + // Set an invalid value so that it can be ignored + lastOffset = -1 + case 2: + offset, lastOffset, timestamp, headers, err = r.readMessageV2(min, key, val) + default: + err = r.header.badMagic() + } + return +} + +func (r *messageSetReader) readMessageV1(min int64, key readBytesFunc, val readBytesFunc) ( + offset int64, timestamp int64, headers []Header, err error) { + + for r.readerStack != nil { + if r.remain == 0 { + r.readerStack = r.parent + continue + } + if err = r.readHeader(); err != nil { + return + } + offset = r.header.firstOffset + timestamp = r.header.v1.timestamp + var codec CompressionCodec + if codec, err = r.header.compression(); err != nil { + return + } + if r.debug { + r.log("Reading with codec=%T", codec) + } + if codec != nil { + // discard next four bytes...will be -1 to indicate null key + if err = r.discardN(4); err != nil { + return + } + + // read and decompress the contained message set. + r.decompressed.Reset() + if err = r.readBytesWith(func(br *bufio.Reader, sz int, n int) (remain int, err error) { + // x4 as a guess that the average compression ratio is near 75% + r.decompressed.Grow(4 * n) + limitReader := io.LimitedReader{R: br, N: int64(n)} + codecReader := codec.NewReader(&limitReader) + _, err = r.decompressed.ReadFrom(codecReader) + remain = sz - (n - int(limitReader.N)) + codecReader.Close() + return + }); err != nil { + return + } + + // the compressed message's offset will be equal to the offset of + // the last message in the set. within the compressed set, the + // offsets will be relative, so we have to scan through them to + // get the base offset. for example, if there are four compressed + // messages at offsets 10-13, then the container message will have + // offset 13 and the contained messages will be 0,1,2,3. the base + // offset for the container, then is 13-3=10. + if offset, err = extractOffset(offset, r.decompressed.Bytes()); err != nil { + return + } + + // mark the outer message as being read + r.markRead() + + // then push the decompressed bytes onto the stack. + r.readerStack = &readerStack{ + // Allocate a buffer of size 0, which gets capped at 16 bytes + // by the bufio package. We are already reading buffered data + // here, no need to reserve another 4KB buffer. + reader: bufio.NewReaderSize(r.decompressed, 0), + remain: r.decompressed.Len(), + base: offset, + parent: r.readerStack, + } + continue + } + + // adjust the offset in case we're reading compressed messages. the + // base will be zero otherwise. + offset += r.base + + // When the messages are compressed kafka may return messages at an + // earlier offset than the one that was requested, it's the client's + // responsibility to ignore those. + // + // At this point, the message header has been read, so discarding + // the rest of the message means we have to discard the key, and then + // the value. Each of those are preceded by a 4-byte length. Discarding + // them is then reading that length variable and then discarding that + // amount. + if offset < min { + // discard the key + if err = r.discardBytes(); err != nil { + return + } + // discard the value + if err = r.discardBytes(); err != nil { + return + } + // since we have fully consumed the message, mark as read + r.markRead() + continue + } + if err = r.readBytesWith(key); err != nil { + return + } + if err = r.readBytesWith(val); err != nil { + return + } + r.markRead() + return + } + err = errShortRead + return +} + +func (r *messageSetReader) readMessageV2(_ int64, key readBytesFunc, val readBytesFunc) ( + offset int64, lastOffset int64, timestamp int64, headers []Header, err error) { + if err = r.readHeader(); err != nil { + return + } + if r.count == int(r.header.v2.count) { // first time reading this set, so check for compression headers. + var codec CompressionCodec + if codec, err = r.header.compression(); err != nil { + return + } + if codec != nil { + batchRemain := int(r.header.length - 49) // TODO: document this magic number + if batchRemain > r.remain { + err = errShortRead + return + } + if batchRemain < 0 { + err = fmt.Errorf("batch remain < 0 (%d)", batchRemain) + return + } + r.decompressed.Reset() + // x4 as a guess that the average compression ratio is near 75% + r.decompressed.Grow(4 * batchRemain) + limitReader := io.LimitedReader{R: r.reader, N: int64(batchRemain)} + codecReader := codec.NewReader(&limitReader) + _, err = r.decompressed.ReadFrom(codecReader) + codecReader.Close() + if err != nil { + return + } + r.remain -= batchRemain - int(limitReader.N) + r.readerStack = &readerStack{ + reader: bufio.NewReaderSize(r.decompressed, 0), // the new stack reads from the decompressed buffer + remain: r.decompressed.Len(), + base: -1, // base is unused here + parent: r.readerStack, + header: r.header, + count: r.count, + } + // all of the messages in this set are in the decompressed set just pushed onto the reader + // stack. here we set the parent count to 0 so that when the child set is exhausted, the + // reader will then try to read the header of the next message set + r.readerStack.parent.count = 0 + } + } + remainBefore := r.remain + var length int64 + if err = r.readVarInt(&length); err != nil { + return + } + lengthOfLength := remainBefore - r.remain + var attrs int8 + if err = r.readInt8(&attrs); err != nil { + return + } + var timestampDelta int64 + if err = r.readVarInt(×tampDelta); err != nil { + return + } + timestamp = r.header.v2.firstTimestamp + timestampDelta + var offsetDelta int64 + if err = r.readVarInt(&offsetDelta); err != nil { + return + } + offset = r.header.firstOffset + offsetDelta + if err = r.runFunc(key); err != nil { + return + } + if err = r.runFunc(val); err != nil { + return + } + var headerCount int64 + if err = r.readVarInt(&headerCount); err != nil { + return + } + if headerCount > 0 { + headers = make([]Header, headerCount) + for i := range headers { + if err = r.readMessageHeader(&headers[i]); err != nil { + return + } + } + } + lastOffset = r.header.firstOffset + int64(r.header.v2.lastOffsetDelta) + r.lengthRemain -= int(length) + lengthOfLength + r.markRead() + return +} + +func (r *messageSetReader) discardBytes() (err error) { + r.remain, err = discardBytes(r.reader, r.remain) + return +} + +func (r *messageSetReader) discardN(sz int) (err error) { + r.remain, err = discardN(r.reader, r.remain, sz) + return +} + +func (r *messageSetReader) markRead() { + if r.count == 0 { + panic("markRead: negative count") + } + r.count-- + r.unwindStack() + if r.debug { + r.log("Mark read remain=%d", r.remain) + } +} + +func (r *messageSetReader) unwindStack() { + for r.count == 0 { + if r.remain == 0 { + if r.parent != nil { + if r.debug { + r.log("Popped reader stack") + } + r.readerStack = r.parent + continue + } + } + break + } +} + +func (r *messageSetReader) readMessageHeader(header *Header) (err error) { + var keyLen int64 + if err = r.readVarInt(&keyLen); err != nil { + return + } + if header.Key, err = r.readNewString(int(keyLen)); err != nil { + return + } + var valLen int64 + if err = r.readVarInt(&valLen); err != nil { + return + } + if header.Value, err = r.readNewBytes(int(valLen)); err != nil { + return + } + return nil +} + +func (r *messageSetReader) runFunc(rbFunc readBytesFunc) (err error) { + var length int64 + if err = r.readVarInt(&length); err != nil { + return + } + if r.remain, err = rbFunc(r.reader, r.remain, int(length)); err != nil { + return + } + return +} + +func (r *messageSetReader) readHeader() (err error) { + if r.count > 0 { + // currently reading a set of messages, no need to read a header until they are exhausted. + return + } + r.header = messagesHeader{} + if err = r.readInt64(&r.header.firstOffset); err != nil { + return + } + if err = r.readInt32(&r.header.length); err != nil { + return + } + var crcOrLeaderEpoch int32 + if err = r.readInt32(&crcOrLeaderEpoch); err != nil { + return + } + if err = r.readInt8(&r.header.magic); err != nil { + return + } + switch r.header.magic { + case 0: + r.header.crc = crcOrLeaderEpoch + if err = r.readInt8(&r.header.v1.attributes); err != nil { + return + } + r.count = 1 + // Set arbitrary non-zero length so that we always assume the + // message is truncated since bytes remain. + r.lengthRemain = 1 + if r.debug { + r.log("Read v0 header with offset=%d len=%d magic=%d attributes=%d", r.header.firstOffset, r.header.length, r.header.magic, r.header.v1.attributes) + } + case 1: + r.header.crc = crcOrLeaderEpoch + if err = r.readInt8(&r.header.v1.attributes); err != nil { + return + } + if err = r.readInt64(&r.header.v1.timestamp); err != nil { + return + } + r.count = 1 + // Set arbitrary non-zero length so that we always assume the + // message is truncated since bytes remain. + r.lengthRemain = 1 + if r.debug { + r.log("Read v1 header with remain=%d offset=%d magic=%d and attributes=%d", r.remain, r.header.firstOffset, r.header.magic, r.header.v1.attributes) + } + case 2: + r.header.v2.leaderEpoch = crcOrLeaderEpoch + if err = r.readInt32(&r.header.crc); err != nil { + return + } + if err = r.readInt16(&r.header.v2.attributes); err != nil { + return + } + if err = r.readInt32(&r.header.v2.lastOffsetDelta); err != nil { + return + } + if err = r.readInt64(&r.header.v2.firstTimestamp); err != nil { + return + } + if err = r.readInt64(&r.header.v2.lastTimestamp); err != nil { + return + } + if err = r.readInt64(&r.header.v2.producerID); err != nil { + return + } + if err = r.readInt16(&r.header.v2.producerEpoch); err != nil { + return + } + if err = r.readInt32(&r.header.v2.baseSequence); err != nil { + return + } + if err = r.readInt32(&r.header.v2.count); err != nil { + return + } + r.count = int(r.header.v2.count) + // Subtracts the header bytes from the length + r.lengthRemain = int(r.header.length) - 49 + if r.debug { + r.log("Read v2 header with count=%d offset=%d len=%d magic=%d attributes=%d", r.count, r.header.firstOffset, r.header.length, r.header.magic, r.header.v2.attributes) + } + default: + err = r.header.badMagic() + return + } + return +} + +func (r *messageSetReader) readNewBytes(len int) (res []byte, err error) { + res, r.remain, err = readNewBytes(r.reader, r.remain, len) + return +} + +func (r *messageSetReader) readNewString(len int) (res string, err error) { + res, r.remain, err = readNewString(r.reader, r.remain, len) + return +} + +func (r *messageSetReader) readInt8(val *int8) (err error) { + r.remain, err = readInt8(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readInt16(val *int16) (err error) { + r.remain, err = readInt16(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readInt32(val *int32) (err error) { + r.remain, err = readInt32(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readInt64(val *int64) (err error) { + r.remain, err = readInt64(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readVarInt(val *int64) (err error) { + r.remain, err = readVarInt(r.reader, r.remain, val) + return +} + +func (r *messageSetReader) readBytesWith(fn readBytesFunc) (err error) { + r.remain, err = readBytesWith(r.reader, r.remain, fn) + return +} + +func (r *messageSetReader) log(msg string, args ...interface{}) { + log.Printf("[DEBUG] "+msg, args...) +} + +func extractOffset(base int64, msgSet []byte) (offset int64, err error) { + r, remain := bufio.NewReader(bytes.NewReader(msgSet)), len(msgSet) + for remain > 0 { + if remain, err = readInt64(r, remain, &offset); err != nil { + return + } + var sz int32 + if remain, err = readInt32(r, remain, &sz); err != nil { + return + } + if remain, err = discardN(r, remain, int(sz)); err != nil { + return + } + } + offset = base - offset + return +} diff --git a/vendor/github.com/segmentio/kafka-go/metadata.go b/vendor/github.com/segmentio/kafka-go/metadata.go new file mode 100644 index 00000000000..429a6a260b0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/metadata.go @@ -0,0 +1,287 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + metadataAPI "github.com/segmentio/kafka-go/protocol/metadata" +) + +// MetadataRequest represents a request sent to a kafka broker to retrieve its +// cluster metadata. +type MetadataRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The list of topics to retrieve metadata for. + Topics []string +} + +// MetadatResponse represents a response from a kafka broker to a metadata +// request. +type MetadataResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Name of the kafka cluster that client retrieved metadata from. + ClusterID string + + // The broker which is currently the controller for the cluster. + Controller Broker + + // The list of brokers registered to the cluster. + Brokers []Broker + + // The list of topics available on the cluster. + Topics []Topic +} + +// Metadata sends a metadata request to a kafka broker and returns the response. +func (c *Client) Metadata(ctx context.Context, req *MetadataRequest) (*MetadataResponse, error) { + m, err := c.roundTrip(ctx, req.Addr, &metadataAPI.Request{ + TopicNames: req.Topics, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).Metadata: %w", err) + } + + res := m.(*metadataAPI.Response) + ret := &MetadataResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Brokers: make([]Broker, len(res.Brokers)), + Topics: make([]Topic, len(res.Topics)), + ClusterID: res.ClusterID, + } + + brokers := make(map[int32]Broker, len(res.Brokers)) + + for i, b := range res.Brokers { + broker := Broker{ + Host: b.Host, + Port: int(b.Port), + ID: int(b.NodeID), + Rack: b.Rack, + } + + ret.Brokers[i] = broker + brokers[b.NodeID] = broker + + if b.NodeID == res.ControllerID { + ret.Controller = broker + } + } + + for i, t := range res.Topics { + ret.Topics[i] = Topic{ + Name: t.Name, + Internal: t.IsInternal, + Partitions: make([]Partition, len(t.Partitions)), + Error: makeError(t.ErrorCode, ""), + } + + for j, p := range t.Partitions { + partition := Partition{ + Topic: t.Name, + ID: int(p.PartitionIndex), + Leader: brokers[p.LeaderID], + Replicas: make([]Broker, len(p.ReplicaNodes)), + Isr: make([]Broker, len(p.IsrNodes)), + Error: makeError(p.ErrorCode, ""), + } + + for i, id := range p.ReplicaNodes { + partition.Replicas[i] = brokers[id] + } + + for i, id := range p.IsrNodes { + partition.Isr[i] = brokers[id] + } + + ret.Topics[i].Partitions[j] = partition + } + } + + return ret, nil +} + +type topicMetadataRequestV1 []string + +func (r topicMetadataRequestV1) size() int32 { + return sizeofStringArray([]string(r)) +} + +func (r topicMetadataRequestV1) writeTo(wb *writeBuffer) { + // communicate nil-ness to the broker by passing -1 as the array length. + // for this particular request, the broker interpets a zero length array + // as a request for no topics whereas a nil array is for all topics. + if r == nil { + wb.writeArrayLen(-1) + } else { + wb.writeStringArray([]string(r)) + } +} + +type metadataResponseV1 struct { + Brokers []brokerMetadataV1 + ControllerID int32 + Topics []topicMetadataV1 +} + +func (r metadataResponseV1) size() int32 { + n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() }) + n2 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) + return 4 + n1 + n2 +} + +func (r metadataResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) }) + wb.writeInt32(r.ControllerID) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type brokerMetadataV1 struct { + NodeID int32 + Host string + Port int32 + Rack string +} + +func (b brokerMetadataV1) size() int32 { + return 4 + 4 + sizeofString(b.Host) + sizeofString(b.Rack) +} + +func (b brokerMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt32(b.NodeID) + wb.writeString(b.Host) + wb.writeInt32(b.Port) + wb.writeString(b.Rack) +} + +type topicMetadataV1 struct { + TopicErrorCode int16 + TopicName string + Internal bool + Partitions []partitionMetadataV1 +} + +func (t topicMetadataV1) size() int32 { + return 2 + 1 + + sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t topicMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt16(t.TopicErrorCode) + wb.writeString(t.TopicName) + wb.writeBool(t.Internal) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type partitionMetadataV1 struct { + PartitionErrorCode int16 + PartitionID int32 + Leader int32 + Replicas []int32 + Isr []int32 +} + +func (p partitionMetadataV1) size() int32 { + return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) +} + +func (p partitionMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt16(p.PartitionErrorCode) + wb.writeInt32(p.PartitionID) + wb.writeInt32(p.Leader) + wb.writeInt32Array(p.Replicas) + wb.writeInt32Array(p.Isr) +} + +type topicMetadataRequestV6 struct { + Topics []string + AllowAutoTopicCreation bool +} + +func (r topicMetadataRequestV6) size() int32 { + return sizeofStringArray([]string(r.Topics)) + 1 +} + +func (r topicMetadataRequestV6) writeTo(wb *writeBuffer) { + // communicate nil-ness to the broker by passing -1 as the array length. + // for this particular request, the broker interpets a zero length array + // as a request for no topics whereas a nil array is for all topics. + if r.Topics == nil { + wb.writeArrayLen(-1) + } else { + wb.writeStringArray([]string(r.Topics)) + } + wb.writeBool(r.AllowAutoTopicCreation) +} + +type metadataResponseV6 struct { + ThrottleTimeMs int32 + Brokers []brokerMetadataV1 + ClusterId string + ControllerID int32 + Topics []topicMetadataV6 +} + +func (r metadataResponseV6) size() int32 { + n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() }) + n2 := sizeofNullableString(&r.ClusterId) + n3 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) + return 4 + 4 + n1 + n2 + n3 +} + +func (r metadataResponseV6) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ThrottleTimeMs) + wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) }) + wb.writeString(r.ClusterId) + wb.writeInt32(r.ControllerID) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type topicMetadataV6 struct { + TopicErrorCode int16 + TopicName string + Internal bool + Partitions []partitionMetadataV6 +} + +func (t topicMetadataV6) size() int32 { + return 2 + 1 + + sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t topicMetadataV6) writeTo(wb *writeBuffer) { + wb.writeInt16(t.TopicErrorCode) + wb.writeString(t.TopicName) + wb.writeBool(t.Internal) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type partitionMetadataV6 struct { + PartitionErrorCode int16 + PartitionID int32 + Leader int32 + Replicas []int32 + Isr []int32 + OfflineReplicas []int32 +} + +func (p partitionMetadataV6) size() int32 { + return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) + sizeofInt32Array(p.OfflineReplicas) +} + +func (p partitionMetadataV6) writeTo(wb *writeBuffer) { + wb.writeInt16(p.PartitionErrorCode) + wb.writeInt32(p.PartitionID) + wb.writeInt32(p.Leader) + wb.writeInt32Array(p.Replicas) + wb.writeInt32Array(p.Isr) + wb.writeInt32Array(p.OfflineReplicas) +} diff --git a/vendor/github.com/segmentio/kafka-go/offsetcommit.go b/vendor/github.com/segmentio/kafka-go/offsetcommit.go new file mode 100644 index 00000000000..260fe4bc9b0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/offsetcommit.go @@ -0,0 +1,302 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/offsetcommit" +) + +// OffsetCommit represent the commit of an offset to a partition. +// +// The extra metadata is opaque to the kafka protocol, it is intended to hold +// information like an identifier for the process that committed the offset, +// or the time at which the commit was made. +type OffsetCommit struct { + Partition int + Offset int64 + Metadata string +} + +// OffsetCommitRequest represents a request sent to a kafka broker to commit +// offsets for a partition. +type OffsetCommitRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // ID of the consumer group to publish the offsets for. + GroupID string + + // ID of the consumer group generation. + GenerationID int + + // ID of the group member submitting the offsets. + MemberID string + + // ID of the group instance. + InstanceID string + + // Set of topic partitions to publish the offsets for. + // + // Not that offset commits need to be submitted to the broker acting as the + // group coordinator. This will be automatically resolved by the transport. + Topics map[string][]OffsetCommit +} + +// OffsetFetchResponse represents a response from a kafka broker to an offset +// commit request. +type OffsetCommitResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Set of topic partitions that the kafka broker has accepted offset commits + // for. + Topics map[string][]OffsetCommitPartition +} + +// OffsetFetchPartition represents the state of a single partition in responses +// to committing offsets. +type OffsetCommitPartition struct { + // ID of the partition. + Partition int + + // An error that may have occurred while attempting to publish consumer + // group offsets for this partition. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// OffsetCommit sends an offset commit request to a kafka broker and returns the +// response. +func (c *Client) OffsetCommit(ctx context.Context, req *OffsetCommitRequest) (*OffsetCommitResponse, error) { + now := time.Now().UnixNano() / int64(time.Millisecond) + topics := make([]offsetcommit.RequestTopic, 0, len(req.Topics)) + + for topicName, commits := range req.Topics { + partitions := make([]offsetcommit.RequestPartition, len(commits)) + + for i, c := range commits { + partitions[i] = offsetcommit.RequestPartition{ + PartitionIndex: int32(c.Partition), + CommittedOffset: c.Offset, + CommittedMetadata: c.Metadata, + // This field existed in v1 of the OffsetCommit API, setting it + // to the current timestamp is probably a safe thing to do, but + // it is hard to tell. + CommitTimestamp: now, + } + } + + topics = append(topics, offsetcommit.RequestTopic{ + Name: topicName, + Partitions: partitions, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &offsetcommit.Request{ + GroupID: req.GroupID, + GenerationID: int32(req.GenerationID), + MemberID: req.MemberID, + GroupInstanceID: req.InstanceID, + Topics: topics, + // Hardcoded retention; this field existed between v2 and v4 of the + // OffsetCommit API, we would have to figure out a way to give the + // client control over the API version being used to support configuring + // it in the request object. + RetentionTimeMs: int64((24 * time.Hour) / time.Millisecond), + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).OffsetCommit: %w", err) + } + r := m.(*offsetcommit.Response) + + res := &OffsetCommitResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Topics: make(map[string][]OffsetCommitPartition, len(r.Topics)), + } + + for _, topic := range r.Topics { + partitions := make([]OffsetCommitPartition, len(topic.Partitions)) + + for i, p := range topic.Partitions { + partitions[i] = OffsetCommitPartition{ + Partition: int(p.PartitionIndex), + Error: makeError(p.ErrorCode, ""), + } + } + + res.Topics[topic.Name] = partitions + } + + return res, nil +} + +type offsetCommitRequestV2Partition struct { + // Partition ID + Partition int32 + + // Offset to be committed + Offset int64 + + // Metadata holds any associated metadata the client wants to keep + Metadata string +} + +func (t offsetCommitRequestV2Partition) size() int32 { + return sizeofInt32(t.Partition) + + sizeofInt64(t.Offset) + + sizeofString(t.Metadata) +} + +func (t offsetCommitRequestV2Partition) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt64(t.Offset) + wb.writeString(t.Metadata) +} + +type offsetCommitRequestV2Topic struct { + // Topic name + Topic string + + // Partitions to commit offsets + Partitions []offsetCommitRequestV2Partition +} + +func (t offsetCommitRequestV2Topic) size() int32 { + return sizeofString(t.Topic) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t offsetCommitRequestV2Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type offsetCommitRequestV2 struct { + // GroupID holds the unique group identifier + GroupID string + + // GenerationID holds the generation of the group. + GenerationID int32 + + // MemberID assigned by the group coordinator + MemberID string + + // RetentionTime holds the time period in ms to retain the offset. + RetentionTime int64 + + // Topics to commit offsets + Topics []offsetCommitRequestV2Topic +} + +func (t offsetCommitRequestV2) size() int32 { + return sizeofString(t.GroupID) + + sizeofInt32(t.GenerationID) + + sizeofString(t.MemberID) + + sizeofInt64(t.RetentionTime) + + sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) +} + +func (t offsetCommitRequestV2) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) + wb.writeInt64(t.RetentionTime) + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) +} + +type offsetCommitResponseV2PartitionResponse struct { + Partition int32 + + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t offsetCommitResponseV2PartitionResponse) size() int32 { + return sizeofInt32(t.Partition) + + sizeofInt16(t.ErrorCode) +} + +func (t offsetCommitResponseV2PartitionResponse) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt16(t.ErrorCode) +} + +func (t *offsetCommitResponseV2PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt32(r, size, &t.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + return +} + +type offsetCommitResponseV2Response struct { + Topic string + PartitionResponses []offsetCommitResponseV2PartitionResponse +} + +func (t offsetCommitResponseV2Response) size() int32 { + return sizeofString(t.Topic) + + sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() }) +} + +func (t offsetCommitResponseV2Response) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) }) +} + +func (t *offsetCommitResponseV2Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.Topic); err != nil { + return + } + + fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + item := offsetCommitResponseV2PartitionResponse{} + if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { + return + } + t.PartitionResponses = append(t.PartitionResponses, item) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + return +} + +type offsetCommitResponseV2 struct { + Responses []offsetCommitResponseV2Response +} + +func (t offsetCommitResponseV2) size() int32 { + return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) +} + +func (t offsetCommitResponseV2) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) }) +} + +func (t *offsetCommitResponseV2) readFrom(r *bufio.Reader, size int) (remain int, err error) { + fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + item := offsetCommitResponseV2Response{} + if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { + return + } + t.Responses = append(t.Responses, item) + return + } + if remain, err = readArrayWith(r, size, fn); err != nil { + return + } + + return +} diff --git a/vendor/github.com/segmentio/kafka-go/offsetdelete.go b/vendor/github.com/segmentio/kafka-go/offsetdelete.go new file mode 100644 index 00000000000..ea526eb2523 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/offsetdelete.go @@ -0,0 +1,106 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/offsetdelete" +) + +// OffsetDelete deletes the offset for a consumer group on a particular topic +// for a particular partition. +type OffsetDelete struct { + Topic string + Partition int +} + +// OffsetDeleteRequest represents a request sent to a kafka broker to delete +// the offsets for a partition on a given topic associated with a consumer group. +type OffsetDeleteRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // ID of the consumer group to delete the offsets for. + GroupID string + + // Set of topic partitions to delete offsets for. + Topics map[string][]int +} + +// OffsetDeleteResponse represents a response from a kafka broker to a delete +// offset request. +type OffsetDeleteResponse struct { + // An error that may have occurred while attempting to delete an offset + Error error + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Set of topic partitions that the kafka broker has additional info (error?) + // for. + Topics map[string][]OffsetDeletePartition +} + +// OffsetDeletePartition represents the state of a status of a partition in response +// to deleting offsets. +type OffsetDeletePartition struct { + // ID of the partition. + Partition int + + // An error that may have occurred while attempting to delete an offset for + // this partition. + Error error +} + +// OffsetDelete sends a delete offset request to a kafka broker and returns the +// response. +func (c *Client) OffsetDelete(ctx context.Context, req *OffsetDeleteRequest) (*OffsetDeleteResponse, error) { + topics := make([]offsetdelete.RequestTopic, 0, len(req.Topics)) + + for topicName, partitionIndexes := range req.Topics { + partitions := make([]offsetdelete.RequestPartition, len(partitionIndexes)) + + for i, c := range partitionIndexes { + partitions[i] = offsetdelete.RequestPartition{ + PartitionIndex: int32(c), + } + } + + topics = append(topics, offsetdelete.RequestTopic{ + Name: topicName, + Partitions: partitions, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &offsetdelete.Request{ + GroupID: req.GroupID, + Topics: topics, + }) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).OffsetDelete: %w", err) + } + r := m.(*offsetdelete.Response) + + res := &OffsetDeleteResponse{ + Error: makeError(r.ErrorCode, ""), + Throttle: makeDuration(r.ThrottleTimeMs), + Topics: make(map[string][]OffsetDeletePartition, len(r.Topics)), + } + + for _, topic := range r.Topics { + partitions := make([]OffsetDeletePartition, len(topic.Partitions)) + + for i, p := range topic.Partitions { + partitions[i] = OffsetDeletePartition{ + Partition: int(p.PartitionIndex), + Error: makeError(p.ErrorCode, ""), + } + } + + res.Topics[topic.Name] = partitions + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/offsetfetch.go b/vendor/github.com/segmentio/kafka-go/offsetfetch.go new file mode 100644 index 00000000000..61fcba2e391 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/offsetfetch.go @@ -0,0 +1,263 @@ +package kafka + +import ( + "bufio" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/offsetfetch" +) + +// OffsetFetchRequest represents a request sent to a kafka broker to read the +// currently committed offsets of topic partitions. +type OffsetFetchRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // ID of the consumer group to retrieve the offsets for. + GroupID string + + // Set of topic partitions to retrieve the offsets for. + Topics map[string][]int +} + +// OffsetFetchResponse represents a response from a kafka broker to an offset +// fetch request. +type OffsetFetchResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Set of topic partitions that the kafka broker has returned offsets for. + Topics map[string][]OffsetFetchPartition + + // An error that may have occurred while attempting to retrieve consumer + // group offsets. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// OffsetFetchPartition represents the state of a single partition in a consumer +// group. +type OffsetFetchPartition struct { + // ID of the partition. + Partition int + + // Last committed offsets on the partition when the request was served by + // the kafka broker. + CommittedOffset int64 + + // Consumer group metadata for this partition. + Metadata string + + // An error that may have occurred while attempting to retrieve consumer + // group offsets for this partition. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// OffsetFetch sends an offset fetch request to a kafka broker and returns the +// response. +func (c *Client) OffsetFetch(ctx context.Context, req *OffsetFetchRequest) (*OffsetFetchResponse, error) { + topics := make([]offsetfetch.RequestTopic, 0, len(req.Topics)) + + for topicName, partitions := range req.Topics { + indexes := make([]int32, len(partitions)) + + for i, p := range partitions { + indexes[i] = int32(p) + } + + topics = append(topics, offsetfetch.RequestTopic{ + Name: topicName, + PartitionIndexes: indexes, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &offsetfetch.Request{ + GroupID: req.GroupID, + Topics: topics, + }) + + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).OffsetFetch: %w", err) + } + + res := m.(*offsetfetch.Response) + ret := &OffsetFetchResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Topics: make(map[string][]OffsetFetchPartition, len(res.Topics)), + Error: makeError(res.ErrorCode, ""), + } + + for _, t := range res.Topics { + partitions := make([]OffsetFetchPartition, len(t.Partitions)) + + for i, p := range t.Partitions { + partitions[i] = OffsetFetchPartition{ + Partition: int(p.PartitionIndex), + CommittedOffset: p.CommittedOffset, + Metadata: p.Metadata, + Error: makeError(p.ErrorCode, ""), + } + } + + ret.Topics[t.Name] = partitions + } + + return ret, nil +} + +type offsetFetchRequestV1Topic struct { + // Topic name + Topic string + + // Partitions to fetch offsets + Partitions []int32 +} + +func (t offsetFetchRequestV1Topic) size() int32 { + return sizeofString(t.Topic) + + sizeofInt32Array(t.Partitions) +} + +func (t offsetFetchRequestV1Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt32Array(t.Partitions) +} + +type offsetFetchRequestV1 struct { + // GroupID holds the unique group identifier + GroupID string + + // Topics to fetch offsets. + Topics []offsetFetchRequestV1Topic +} + +func (t offsetFetchRequestV1) size() int32 { + return sizeofString(t.GroupID) + + sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) +} + +func (t offsetFetchRequestV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) +} + +type offsetFetchResponseV1PartitionResponse struct { + // Partition ID + Partition int32 + + // Offset of last committed message + Offset int64 + + // Metadata client wants to keep + Metadata string + + // ErrorCode holds response error code + ErrorCode int16 +} + +func (t offsetFetchResponseV1PartitionResponse) size() int32 { + return sizeofInt32(t.Partition) + + sizeofInt64(t.Offset) + + sizeofString(t.Metadata) + + sizeofInt16(t.ErrorCode) +} + +func (t offsetFetchResponseV1PartitionResponse) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt64(t.Offset) + wb.writeString(t.Metadata) + wb.writeInt16(t.ErrorCode) +} + +func (t *offsetFetchResponseV1PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt32(r, size, &t.Partition); err != nil { + return + } + if remain, err = readInt64(r, remain, &t.Offset); err != nil { + return + } + if remain, err = readString(r, remain, &t.Metadata); err != nil { + return + } + if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + return + } + return +} + +type offsetFetchResponseV1Response struct { + // Topic name + Topic string + + // PartitionResponses holds offsets by partition + PartitionResponses []offsetFetchResponseV1PartitionResponse +} + +func (t offsetFetchResponseV1Response) size() int32 { + return sizeofString(t.Topic) + + sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() }) +} + +func (t offsetFetchResponseV1Response) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) }) +} + +func (t *offsetFetchResponseV1Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readString(r, size, &t.Topic); err != nil { + return + } + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + item := offsetFetchResponseV1PartitionResponse{} + if fnRemain, fnErr = (&item).readFrom(r, size); err != nil { + return + } + t.PartitionResponses = append(t.PartitionResponses, item) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + return +} + +type offsetFetchResponseV1 struct { + // Responses holds topic partition offsets + Responses []offsetFetchResponseV1Response +} + +func (t offsetFetchResponseV1) size() int32 { + return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) +} + +func (t offsetFetchResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) }) +} + +func (t *offsetFetchResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { + item := offsetFetchResponseV1Response{} + if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { + return + } + t.Responses = append(t.Responses, item) + return + } + if remain, err = readArrayWith(r, size, fn); err != nil { + return + } + + return +} diff --git a/vendor/github.com/segmentio/kafka-go/produce.go b/vendor/github.com/segmentio/kafka-go/produce.go new file mode 100644 index 00000000000..72d1ed09b45 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/produce.go @@ -0,0 +1,323 @@ +package kafka + +import ( + "bufio" + "context" + "encoding" + "errors" + "fmt" + "net" + "strconv" + "time" + + "github.com/segmentio/kafka-go/protocol" + produceAPI "github.com/segmentio/kafka-go/protocol/produce" +) + +type RequiredAcks int + +const ( + RequireNone RequiredAcks = 0 + RequireOne RequiredAcks = 1 + RequireAll RequiredAcks = -1 +) + +func (acks RequiredAcks) String() string { + switch acks { + case RequireNone: + return "none" + case RequireOne: + return "one" + case RequireAll: + return "all" + default: + return "unknown" + } +} + +func (acks RequiredAcks) MarshalText() ([]byte, error) { + return []byte(acks.String()), nil +} + +func (acks *RequiredAcks) UnmarshalText(b []byte) error { + switch string(b) { + case "none": + *acks = RequireNone + case "one": + *acks = RequireOne + case "all": + *acks = RequireAll + default: + x, err := strconv.ParseInt(string(b), 10, 64) + parsed := RequiredAcks(x) + if err != nil || (parsed != RequireNone && parsed != RequireOne && parsed != RequireAll) { + return fmt.Errorf("required acks must be one of none, one, or all, not %q", b) + } + *acks = parsed + } + return nil +} + +var ( + _ encoding.TextMarshaler = RequiredAcks(0) + _ encoding.TextUnmarshaler = (*RequiredAcks)(nil) +) + +// ProduceRequest represents a request sent to a kafka broker to produce records +// to a topic partition. +type ProduceRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The topic to produce the records to. + Topic string + + // The partition to produce the records to. + Partition int + + // The level of required acknowledgements to ask the kafka broker for. + RequiredAcks RequiredAcks + + // The message format version used when encoding the records. + // + // By default, the client automatically determine which version should be + // used based on the version of the Produce API supported by the server. + MessageVersion int + + // An optional transaction id when producing to the kafka broker is part of + // a transaction. + TransactionalID string + + // The sequence of records to produce to the topic partition. + Records RecordReader + + // An optional compression algorithm to apply to the batch of records sent + // to the kafka broker. + Compression Compression +} + +// ProduceResponse represents a response from a kafka broker to a produce +// request. +type ProduceResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // An error that may have occurred while attempting to produce the records. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error + + // Offset of the first record that was written to the topic partition. + // + // This field will be zero if the kafka broker did not support Produce API + // version 3 or above. + BaseOffset int64 + + // Time at which the broker wrote the records to the topic partition. + // + // This field will be zero if the kafka broker did not support Produce API + // version 2 or above. + LogAppendTime time.Time + + // First offset in the topic partition that the records were written to. + // + // This field will be zero if the kafka broker did not support Produce + // API version 5 or above (or if the first offset is zero). + LogStartOffset int64 + + // If errors occurred writing specific records, they will be reported in + // this map. + // + // This field will always be empty if the kafka broker did not support the + // Produce API in version 8 or above. + RecordErrors map[int]error +} + +// Produce sends a produce request to a kafka broker and returns the response. +// +// If the request contained no records, an error wrapping protocol.ErrNoRecord +// is returned. +// +// When the request is configured with RequiredAcks=none, both the response and +// the error will be nil on success. +func (c *Client) Produce(ctx context.Context, req *ProduceRequest) (*ProduceResponse, error) { + attributes := protocol.Attributes(req.Compression) & 0x7 + + m, err := c.roundTrip(ctx, req.Addr, &produceAPI.Request{ + TransactionalID: req.TransactionalID, + Acks: int16(req.RequiredAcks), + Timeout: c.timeoutMs(ctx, defaultProduceTimeout), + Topics: []produceAPI.RequestTopic{{ + Topic: req.Topic, + Partitions: []produceAPI.RequestPartition{{ + Partition: int32(req.Partition), + RecordSet: protocol.RecordSet{ + Attributes: attributes, + Records: req.Records, + }, + }}, + }}, + }) + + switch { + case err == nil: + case errors.Is(err, protocol.ErrNoRecord): + return new(ProduceResponse), nil + default: + return nil, fmt.Errorf("kafka.(*Client).Produce: %w", err) + } + + if req.RequiredAcks == RequireNone { + return nil, nil + } + + res := m.(*produceAPI.Response) + if len(res.Topics) == 0 { + return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoTopic) + } + topic := &res.Topics[0] + if len(topic.Partitions) == 0 { + return nil, fmt.Errorf("kafka.(*Client).Produce: %w", protocol.ErrNoPartition) + } + partition := &topic.Partitions[0] + + ret := &ProduceResponse{ + Throttle: makeDuration(res.ThrottleTimeMs), + Error: makeError(partition.ErrorCode, partition.ErrorMessage), + BaseOffset: partition.BaseOffset, + LogAppendTime: makeTime(partition.LogAppendTime), + LogStartOffset: partition.LogStartOffset, + } + + if len(partition.RecordErrors) != 0 { + ret.RecordErrors = make(map[int]error, len(partition.RecordErrors)) + + for _, recErr := range partition.RecordErrors { + ret.RecordErrors[int(recErr.BatchIndex)] = errors.New(recErr.BatchIndexErrorMessage) + } + } + + return ret, nil +} + +type produceRequestV2 struct { + RequiredAcks int16 + Timeout int32 + Topics []produceRequestTopicV2 +} + +func (r produceRequestV2) size() int32 { + return 2 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) +} + +func (r produceRequestV2) writeTo(wb *writeBuffer) { + wb.writeInt16(r.RequiredAcks) + wb.writeInt32(r.Timeout) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) +} + +type produceRequestTopicV2 struct { + TopicName string + Partitions []produceRequestPartitionV2 +} + +func (t produceRequestTopicV2) size() int32 { + return sizeofString(t.TopicName) + + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) +} + +func (t produceRequestTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) +} + +type produceRequestPartitionV2 struct { + Partition int32 + MessageSetSize int32 + MessageSet messageSet +} + +func (p produceRequestPartitionV2) size() int32 { + return 4 + 4 + p.MessageSet.size() +} + +func (p produceRequestPartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt32(p.MessageSetSize) + p.MessageSet.writeTo(wb) +} + +type produceResponsePartitionV2 struct { + Partition int32 + ErrorCode int16 + Offset int64 + Timestamp int64 +} + +func (p produceResponsePartitionV2) size() int32 { + return 4 + 2 + 8 + 8 +} + +func (p produceResponsePartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Offset) + wb.writeInt64(p.Timestamp) +} + +func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt32(r, sz, &p.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Offset); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { + return + } + return +} + +type produceResponsePartitionV7 struct { + Partition int32 + ErrorCode int16 + Offset int64 + Timestamp int64 + StartOffset int64 +} + +func (p produceResponsePartitionV7) size() int32 { + return 4 + 2 + 8 + 8 + 8 +} + +func (p produceResponsePartitionV7) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Offset) + wb.writeInt64(p.Timestamp) + wb.writeInt64(p.StartOffset) +} + +func (p *produceResponsePartitionV7) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt32(r, sz, &p.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Offset); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.StartOffset); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol.go b/vendor/github.com/segmentio/kafka-go/protocol.go new file mode 100644 index 00000000000..37208abf137 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol.go @@ -0,0 +1,214 @@ +package kafka + +import ( + "encoding/binary" + "fmt" + "strconv" +) + +type ApiVersion struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (v ApiVersion) Format(w fmt.State, r rune) { + switch r { + case 's': + fmt.Fprint(w, apiKey(v.ApiKey)) + case 'd': + switch { + case w.Flag('-'): + fmt.Fprint(w, v.MinVersion) + case w.Flag('+'): + fmt.Fprint(w, v.MaxVersion) + default: + fmt.Fprint(w, v.ApiKey) + } + case 'v': + switch { + case w.Flag('-'): + fmt.Fprintf(w, "v%d", v.MinVersion) + case w.Flag('+'): + fmt.Fprintf(w, "v%d", v.MaxVersion) + case w.Flag('#'): + fmt.Fprintf(w, "kafka.ApiVersion{ApiKey:%d MinVersion:%d MaxVersion:%d}", v.ApiKey, v.MinVersion, v.MaxVersion) + default: + fmt.Fprintf(w, "%s[v%d:v%d]", apiKey(v.ApiKey), v.MinVersion, v.MaxVersion) + } + } +} + +type apiKey int16 + +const ( + produce apiKey = 0 + fetch apiKey = 1 + listOffsets apiKey = 2 + metadata apiKey = 3 + leaderAndIsr apiKey = 4 + stopReplica apiKey = 5 + updateMetadata apiKey = 6 + controlledShutdown apiKey = 7 + offsetCommit apiKey = 8 + offsetFetch apiKey = 9 + findCoordinator apiKey = 10 + joinGroup apiKey = 11 + heartbeat apiKey = 12 + leaveGroup apiKey = 13 + syncGroup apiKey = 14 + describeGroups apiKey = 15 + listGroups apiKey = 16 + saslHandshake apiKey = 17 + apiVersions apiKey = 18 + createTopics apiKey = 19 + deleteTopics apiKey = 20 + deleteRecords apiKey = 21 + initProducerId apiKey = 22 + offsetForLeaderEpoch apiKey = 23 + addPartitionsToTxn apiKey = 24 + addOffsetsToTxn apiKey = 25 + endTxn apiKey = 26 + writeTxnMarkers apiKey = 27 + txnOffsetCommit apiKey = 28 + describeAcls apiKey = 29 + createAcls apiKey = 30 + deleteAcls apiKey = 31 + describeConfigs apiKey = 32 + alterConfigs apiKey = 33 + alterReplicaLogDirs apiKey = 34 + describeLogDirs apiKey = 35 + saslAuthenticate apiKey = 36 + createPartitions apiKey = 37 + createDelegationToken apiKey = 38 + renewDelegationToken apiKey = 39 + expireDelegationToken apiKey = 40 + describeDelegationToken apiKey = 41 + deleteGroups apiKey = 42 + electLeaders apiKey = 43 + incrementalAlterConfigs apiKey = 44 + alterPartitionReassignments apiKey = 45 + listPartitionReassignments apiKey = 46 + offsetDelete apiKey = 47 +) + +func (k apiKey) String() string { + if i := int(k); i >= 0 && i < len(apiKeyStrings) { + return apiKeyStrings[i] + } + return strconv.Itoa(int(k)) +} + +type apiVersion int16 + +const ( + v0 = 0 + v1 = 1 + v2 = 2 + v3 = 3 + v5 = 5 + v6 = 6 + v7 = 7 + v10 = 10 + + // Unused protocol versions: v4, v8, v9. +) + +var apiKeyStrings = [...]string{ + produce: "Produce", + fetch: "Fetch", + listOffsets: "ListOffsets", + metadata: "Metadata", + leaderAndIsr: "LeaderAndIsr", + stopReplica: "StopReplica", + updateMetadata: "UpdateMetadata", + controlledShutdown: "ControlledShutdown", + offsetCommit: "OffsetCommit", + offsetFetch: "OffsetFetch", + findCoordinator: "FindCoordinator", + joinGroup: "JoinGroup", + heartbeat: "Heartbeat", + leaveGroup: "LeaveGroup", + syncGroup: "SyncGroup", + describeGroups: "DescribeGroups", + listGroups: "ListGroups", + saslHandshake: "SaslHandshake", + apiVersions: "ApiVersions", + createTopics: "CreateTopics", + deleteTopics: "DeleteTopics", + deleteRecords: "DeleteRecords", + initProducerId: "InitProducerId", + offsetForLeaderEpoch: "OffsetForLeaderEpoch", + addPartitionsToTxn: "AddPartitionsToTxn", + addOffsetsToTxn: "AddOffsetsToTxn", + endTxn: "EndTxn", + writeTxnMarkers: "WriteTxnMarkers", + txnOffsetCommit: "TxnOffsetCommit", + describeAcls: "DescribeAcls", + createAcls: "CreateAcls", + deleteAcls: "DeleteAcls", + describeConfigs: "DescribeConfigs", + alterConfigs: "AlterConfigs", + alterReplicaLogDirs: "AlterReplicaLogDirs", + describeLogDirs: "DescribeLogDirs", + saslAuthenticate: "SaslAuthenticate", + createPartitions: "CreatePartitions", + createDelegationToken: "CreateDelegationToken", + renewDelegationToken: "RenewDelegationToken", + expireDelegationToken: "ExpireDelegationToken", + describeDelegationToken: "DescribeDelegationToken", + deleteGroups: "DeleteGroups", + electLeaders: "ElectLeaders", + incrementalAlterConfigs: "IncrementalAlfterConfigs", + alterPartitionReassignments: "AlterPartitionReassignments", + listPartitionReassignments: "ListPartitionReassignments", + offsetDelete: "OffsetDelete", +} + +type requestHeader struct { + Size int32 + ApiKey int16 + ApiVersion int16 + CorrelationID int32 + ClientID string +} + +func (h requestHeader) size() int32 { + return 4 + 2 + 2 + 4 + sizeofString(h.ClientID) +} + +func (h requestHeader) writeTo(wb *writeBuffer) { + wb.writeInt32(h.Size) + wb.writeInt16(h.ApiKey) + wb.writeInt16(h.ApiVersion) + wb.writeInt32(h.CorrelationID) + wb.writeString(h.ClientID) +} + +type request interface { + size() int32 + writable +} + +func makeInt8(b []byte) int8 { + return int8(b[0]) +} + +func makeInt16(b []byte) int16 { + return int16(binary.BigEndian.Uint16(b)) +} + +func makeInt32(b []byte) int32 { + return int32(binary.BigEndian.Uint32(b)) +} + +func makeInt64(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func expectZeroSize(sz int, err error) error { + if err == nil && sz != 0 { + err = fmt.Errorf("reading a response left %d unread bytes", sz) + } + return err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/addoffsetstotxn/addoffsetstotxn.go b/vendor/github.com/segmentio/kafka-go/protocol/addoffsetstotxn/addoffsetstotxn.go new file mode 100644 index 00000000000..390e0db43e1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/addoffsetstotxn/addoffsetstotxn.go @@ -0,0 +1,35 @@ +package addoffsetstotxn + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + ProducerID int64 `kafka:"min=v0,max=v3"` + ProducerEpoch int16 `kafka:"min=v0,max=v3"` + GroupID string `kafka:"min=v0,max=v3|min=v3,max=v3,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn } + +func (r *Request) Transaction() string { return r.TransactionalID } + +var _ protocol.TransactionalMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddOffsetsToTxn } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/addpartitionstotxn/addpartitionstotxn.go b/vendor/github.com/segmentio/kafka-go/protocol/addpartitionstotxn/addpartitionstotxn.go new file mode 100644 index 00000000000..b204da35006 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/addpartitionstotxn/addpartitionstotxn.go @@ -0,0 +1,62 @@ +package addpartitionstotxn + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + ProducerID int64 `kafka:"min=v0,max=v3"` + ProducerEpoch int16 `kafka:"min=v0,max=v3"` + Topics []RequestTopic `kafka:"min=v0,max=v3"` +} + +type RequestTopic struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + Partitions []int32 `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn } + +func (r *Request) Transaction() string { return r.TransactionalID } + +var _ protocol.TransactionalMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + Results []ResponseResult `kafka:"min=v0,max=v3"` +} + +type ResponseResult struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + Results []ResponsePartition `kafka:"min=v0,max=v3"` +} + +type ResponsePartition struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + PartitionIndex int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AddPartitionsToTxn } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/alterclientquotas/alterclientquotas.go b/vendor/github.com/segmentio/kafka-go/protocol/alterclientquotas/alterclientquotas.go new file mode 100644 index 00000000000..c657d92ac33 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/alterclientquotas/alterclientquotas.go @@ -0,0 +1,68 @@ +package alterclientquotas + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterClientQuotas +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Entries []Entry `kafka:"min=v0,max=v1"` + ValidateOnly bool `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterClientQuotas } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Entry struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Entities []Entity `kafka:"min=v0,max=v1"` + Ops []Ops `kafka:"min=v0,max=v1"` +} + +type Entity struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + EntityType string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"` + EntityName string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"` +} + +type Ops struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Key string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"` + Value float64 `kafka:"min=v0,max=v1"` + Remove bool `kafka:"min=v0,max=v1"` +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` + Results []ResponseQuotas `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterClientQuotas } + +type ResponseQuotas struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` + Entities []Entity `kafka:"min=v0,max=v1"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/alterconfigs/alterconfigs.go b/vendor/github.com/segmentio/kafka-go/protocol/alterconfigs/alterconfigs.go new file mode 100644 index 00000000000..6c7d0d5dbe7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/alterconfigs/alterconfigs.go @@ -0,0 +1,48 @@ +package alterconfigs + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterConfigs +type Request struct { + Resources []RequestResources `kafka:"min=v0,max=v1"` + ValidateOnly bool `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.AlterConfigs } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestResources struct { + ResourceType int8 `kafka:"min=v0,max=v1"` + ResourceName string `kafka:"min=v0,max=v1"` + Configs []RequestConfig `kafka:"min=v0,max=v1"` +} + +type RequestConfig struct { + Name string `kafka:"min=v0,max=v1"` + Value string `kafka:"min=v0,max=v1,nullable"` +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` + Responses []ResponseResponses `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.AlterConfigs } + +type ResponseResponses struct { + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` + ResourceType int8 `kafka:"min=v0,max=v1"` + ResourceName string `kafka:"min=v0,max=v1"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/alterpartitionreassignments/alterpartitionreassignments.go b/vendor/github.com/segmentio/kafka-go/protocol/alterpartitionreassignments/alterpartitionreassignments.go new file mode 100644 index 00000000000..4894a2e6a78 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/alterpartitionreassignments/alterpartitionreassignments.go @@ -0,0 +1,61 @@ +package alterpartitionreassignments + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_AlterPartitionReassignments +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + TimeoutMs int32 `kafka:"min=v0,max=v0"` + Topics []RequestTopic `kafka:"min=v0,max=v0"` +} + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v0"` + Partitions []RequestPartition `kafka:"min=v0,max=v0"` +} + +type RequestPartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v0"` + Replicas []int32 `kafka:"min=v0,max=v0"` +} + +func (r *Request) ApiKey() protocol.ApiKey { + return protocol.AlterPartitionReassignments +} + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v0,max=v0,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` + Results []ResponseResult `kafka:"min=v0,max=v0"` +} + +type ResponseResult struct { + Name string `kafka:"min=v0,max=v0"` + Partitions []ResponsePartition `kafka:"min=v0,max=v0"` +} + +type ResponsePartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v0"` + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` +} + +func (r *Response) ApiKey() protocol.ApiKey { + return protocol.AlterPartitionReassignments +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/apiversions/apiversions.go b/vendor/github.com/segmentio/kafka-go/protocol/apiversions/apiversions.go new file mode 100644 index 00000000000..1c574558263 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/apiversions/apiversions.go @@ -0,0 +1,27 @@ +package apiversions + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + _ struct{} `kafka:"min=v0,max=v2"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.ApiVersions } + +type Response struct { + ErrorCode int16 `kafka:"min=v0,max=v2"` + ApiKeys []ApiKeyResponse `kafka:"min=v0,max=v2"` + ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.ApiVersions } + +type ApiKeyResponse struct { + ApiKey int16 `kafka:"min=v0,max=v2"` + MinVersion int16 `kafka:"min=v0,max=v2"` + MaxVersion int16 `kafka:"min=v0,max=v2"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/buffer.go b/vendor/github.com/segmentio/kafka-go/protocol/buffer.go new file mode 100644 index 00000000000..d45a91dbddc --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/buffer.go @@ -0,0 +1,634 @@ +package protocol + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "sync" + "sync/atomic" +) + +// Bytes is an interface implemented by types that represent immutable +// sequences of bytes. +// +// Bytes values are used to abstract the location where record keys and +// values are read from (e.g. in-memory buffers, network sockets, files). +// +// The Close method should be called to release resources held by the object +// when the program is done with it. +// +// Bytes values are generally not safe to use concurrently from multiple +// goroutines. +type Bytes interface { + io.ReadCloser + // Returns the number of bytes remaining to be read from the payload. + Len() int +} + +// NewBytes constructs a Bytes value from b. +// +// The returned value references b, it does not make a copy of the backing +// array. +// +// If b is nil, nil is returned to represent a null BYTES value in the kafka +// protocol. +func NewBytes(b []byte) Bytes { + if b == nil { + return nil + } + r := new(bytesReader) + r.Reset(b) + return r +} + +// ReadAll is similar to ioutil.ReadAll, but it takes advantage of knowing the +// length of b to minimize the memory footprint. +// +// The function returns a nil slice if b is nil. +func ReadAll(b Bytes) ([]byte, error) { + if b == nil { + return nil, nil + } + s := make([]byte, b.Len()) + _, err := io.ReadFull(b, s) + return s, err +} + +type bytesReader struct{ bytes.Reader } + +func (*bytesReader) Close() error { return nil } + +type refCount uintptr + +func (rc *refCount) ref() { atomic.AddUintptr((*uintptr)(rc), 1) } + +func (rc *refCount) unref(onZero func()) { + if atomic.AddUintptr((*uintptr)(rc), ^uintptr(0)) == 0 { + onZero() + } +} + +const ( + // Size of the memory buffer for a single page. We use a farily + // large size here (64 KiB) because batches exchanged with kafka + // tend to be multiple kilobytes in size, sometimes hundreds. + // Using large pages amortizes the overhead of the page metadata + // and algorithms to manage the pages. + pageSize = 65536 +) + +type page struct { + refc refCount + offset int64 + length int + buffer *[pageSize]byte +} + +func newPage(offset int64) *page { + p, _ := pagePool.Get().(*page) + if p != nil { + p.offset = offset + p.length = 0 + p.ref() + } else { + p = &page{ + refc: 1, + offset: offset, + buffer: &[pageSize]byte{}, + } + } + return p +} + +func (p *page) ref() { p.refc.ref() } + +func (p *page) unref() { p.refc.unref(func() { pagePool.Put(p) }) } + +func (p *page) slice(begin, end int64) []byte { + i, j := begin-p.offset, end-p.offset + + if i < 0 { + i = 0 + } else if i > pageSize { + i = pageSize + } + + if j < 0 { + j = 0 + } else if j > pageSize { + j = pageSize + } + + if i < j { + return p.buffer[i:j] + } + + return nil +} + +func (p *page) Cap() int { return pageSize } + +func (p *page) Len() int { return p.length } + +func (p *page) Size() int64 { return int64(p.length) } + +func (p *page) Truncate(n int) { + if n < p.length { + p.length = n + } +} + +func (p *page) ReadAt(b []byte, off int64) (int, error) { + if off -= p.offset; off < 0 || off > pageSize { + panic("offset out of range") + } + if off > int64(p.length) { + return 0, nil + } + return copy(b, p.buffer[off:p.length]), nil +} + +func (p *page) ReadFrom(r io.Reader) (int64, error) { + n, err := io.ReadFull(r, p.buffer[p.length:]) + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + err = nil + } + p.length += n + return int64(n), err +} + +func (p *page) WriteAt(b []byte, off int64) (int, error) { + if off -= p.offset; off < 0 || off > pageSize { + panic("offset out of range") + } + n := copy(p.buffer[off:], b) + if end := int(off) + n; end > p.length { + p.length = end + } + return n, nil +} + +func (p *page) Write(b []byte) (int, error) { + return p.WriteAt(b, p.offset+int64(p.length)) +} + +var ( + _ io.ReaderAt = (*page)(nil) + _ io.ReaderFrom = (*page)(nil) + _ io.Writer = (*page)(nil) + _ io.WriterAt = (*page)(nil) +) + +type pageBuffer struct { + refc refCount + pages contiguousPages + length int + cursor int +} + +func newPageBuffer() *pageBuffer { + b, _ := pageBufferPool.Get().(*pageBuffer) + if b != nil { + b.cursor = 0 + b.refc.ref() + } else { + b = &pageBuffer{ + refc: 1, + pages: make(contiguousPages, 0, 16), + } + } + return b +} + +func (pb *pageBuffer) refTo(ref *pageRef, begin, end int64) { + length := end - begin + + if length > math.MaxUint32 { + panic("reference to contiguous buffer pages exceeds the maximum size of 4 GB") + } + + ref.pages = append(ref.buffer[:0], pb.pages.slice(begin, end)...) + ref.pages.ref() + ref.offset = begin + ref.length = uint32(length) +} + +func (pb *pageBuffer) ref(begin, end int64) *pageRef { + ref := new(pageRef) + pb.refTo(ref, begin, end) + return ref +} + +func (pb *pageBuffer) unref() { + pb.refc.unref(func() { + pb.pages.unref() + pb.pages.clear() + pb.pages = pb.pages[:0] + pb.length = 0 + pageBufferPool.Put(pb) + }) +} + +func (pb *pageBuffer) newPage() *page { + return newPage(int64(pb.length)) +} + +func (pb *pageBuffer) Close() error { + return nil +} + +func (pb *pageBuffer) Len() int { + return pb.length - pb.cursor +} + +func (pb *pageBuffer) Size() int64 { + return int64(pb.length) +} + +func (pb *pageBuffer) Discard(n int) (int, error) { + remain := pb.length - pb.cursor + if remain < n { + n = remain + } + pb.cursor += n + return n, nil +} + +func (pb *pageBuffer) Truncate(n int) { + if n < pb.length { + pb.length = n + + if n < pb.cursor { + pb.cursor = n + } + + for i := range pb.pages { + if p := pb.pages[i]; p.length <= n { + n -= p.length + } else { + if n > 0 { + pb.pages[i].Truncate(n) + i++ + } + pb.pages[i:].unref() + pb.pages[i:].clear() + pb.pages = pb.pages[:i] + break + } + } + } +} + +func (pb *pageBuffer) Seek(offset int64, whence int) (int64, error) { + c, err := seek(int64(pb.cursor), int64(pb.length), offset, whence) + if err != nil { + return -1, err + } + pb.cursor = int(c) + return c, nil +} + +func (pb *pageBuffer) ReadByte() (byte, error) { + b := [1]byte{} + _, err := pb.Read(b[:]) + return b[0], err +} + +func (pb *pageBuffer) Read(b []byte) (int, error) { + if pb.cursor >= pb.length { + return 0, io.EOF + } + n, err := pb.ReadAt(b, int64(pb.cursor)) + pb.cursor += n + return n, err +} + +func (pb *pageBuffer) ReadAt(b []byte, off int64) (int, error) { + return pb.pages.ReadAt(b, off) +} + +func (pb *pageBuffer) ReadFrom(r io.Reader) (int64, error) { + if len(pb.pages) == 0 { + pb.pages = append(pb.pages, pb.newPage()) + } + + rn := int64(0) + + for { + tail := pb.pages[len(pb.pages)-1] + free := tail.Cap() - tail.Len() + + if free == 0 { + tail = pb.newPage() + free = pageSize + pb.pages = append(pb.pages, tail) + } + + n, err := tail.ReadFrom(r) + pb.length += int(n) + rn += n + if n < int64(free) { + return rn, err + } + } +} + +func (pb *pageBuffer) WriteString(s string) (int, error) { + return pb.Write([]byte(s)) +} + +func (pb *pageBuffer) Write(b []byte) (int, error) { + wn := len(b) + if wn == 0 { + return 0, nil + } + + if len(pb.pages) == 0 { + pb.pages = append(pb.pages, pb.newPage()) + } + + for len(b) != 0 { + tail := pb.pages[len(pb.pages)-1] + free := tail.Cap() - tail.Len() + + if len(b) <= free { + tail.Write(b) + pb.length += len(b) + break + } + + tail.Write(b[:free]) + b = b[free:] + + pb.length += free + pb.pages = append(pb.pages, pb.newPage()) + } + + return wn, nil +} + +func (pb *pageBuffer) WriteAt(b []byte, off int64) (int, error) { + n, err := pb.pages.WriteAt(b, off) + if err != nil { + return n, err + } + if n < len(b) { + pb.Write(b[n:]) + } + return len(b), nil +} + +func (pb *pageBuffer) WriteTo(w io.Writer) (int64, error) { + var wn int + var err error + pb.pages.scan(int64(pb.cursor), int64(pb.length), func(b []byte) bool { + var n int + n, err = w.Write(b) + wn += n + return err == nil + }) + pb.cursor += wn + return int64(wn), err +} + +var ( + _ io.ReaderAt = (*pageBuffer)(nil) + _ io.ReaderFrom = (*pageBuffer)(nil) + _ io.StringWriter = (*pageBuffer)(nil) + _ io.Writer = (*pageBuffer)(nil) + _ io.WriterAt = (*pageBuffer)(nil) + _ io.WriterTo = (*pageBuffer)(nil) + + pagePool sync.Pool + pageBufferPool sync.Pool +) + +type contiguousPages []*page + +func (pages contiguousPages) ref() { + for _, p := range pages { + p.ref() + } +} + +func (pages contiguousPages) unref() { + for _, p := range pages { + p.unref() + } +} + +func (pages contiguousPages) clear() { + for i := range pages { + pages[i] = nil + } +} + +func (pages contiguousPages) ReadAt(b []byte, off int64) (int, error) { + rn := 0 + + for _, p := range pages.slice(off, off+int64(len(b))) { + n, _ := p.ReadAt(b, off) + b = b[n:] + rn += n + off += int64(n) + } + + return rn, nil +} + +func (pages contiguousPages) WriteAt(b []byte, off int64) (int, error) { + wn := 0 + + for _, p := range pages.slice(off, off+int64(len(b))) { + n, _ := p.WriteAt(b, off) + b = b[n:] + wn += n + off += int64(n) + } + + return wn, nil +} + +func (pages contiguousPages) slice(begin, end int64) contiguousPages { + i := pages.indexOf(begin) + j := pages.indexOf(end) + if j < len(pages) { + j++ + } + return pages[i:j] +} + +func (pages contiguousPages) indexOf(offset int64) int { + if len(pages) == 0 { + return 0 + } + return int((offset - pages[0].offset) / pageSize) +} + +func (pages contiguousPages) scan(begin, end int64, f func([]byte) bool) { + for _, p := range pages.slice(begin, end) { + if !f(p.slice(begin, end)) { + break + } + } +} + +var ( + _ io.ReaderAt = contiguousPages{} + _ io.WriterAt = contiguousPages{} +) + +type pageRef struct { + buffer [2]*page + pages contiguousPages + offset int64 + cursor int64 + length uint32 + once uint32 +} + +func (ref *pageRef) unref() { + if atomic.CompareAndSwapUint32(&ref.once, 0, 1) { + ref.pages.unref() + ref.pages.clear() + ref.pages = nil + ref.offset = 0 + ref.cursor = 0 + ref.length = 0 + } +} + +func (ref *pageRef) Len() int { return int(ref.Size() - ref.cursor) } + +func (ref *pageRef) Size() int64 { return int64(ref.length) } + +func (ref *pageRef) Close() error { ref.unref(); return nil } + +func (ref *pageRef) String() string { + return fmt.Sprintf("[offset=%d cursor=%d length=%d]", ref.offset, ref.cursor, ref.length) +} + +func (ref *pageRef) Seek(offset int64, whence int) (int64, error) { + c, err := seek(ref.cursor, int64(ref.length), offset, whence) + if err != nil { + return -1, err + } + ref.cursor = c + return c, nil +} + +func (ref *pageRef) ReadByte() (byte, error) { + var c byte + var ok bool + ref.scan(ref.cursor, func(b []byte) bool { + c, ok = b[0], true + return false + }) + if ok { + ref.cursor++ + } else { + return 0, io.EOF + } + return c, nil +} + +func (ref *pageRef) Read(b []byte) (int, error) { + if ref.cursor >= int64(ref.length) { + return 0, io.EOF + } + n, err := ref.ReadAt(b, ref.cursor) + ref.cursor += int64(n) + return n, err +} + +func (ref *pageRef) ReadAt(b []byte, off int64) (int, error) { + limit := ref.offset + int64(ref.length) + off += ref.offset + + if off >= limit { + return 0, io.EOF + } + + if off+int64(len(b)) > limit { + b = b[:limit-off] + } + + if len(b) == 0 { + return 0, nil + } + + n, err := ref.pages.ReadAt(b, off) + if n == 0 && err == nil { + err = io.EOF + } + return n, err +} + +func (ref *pageRef) WriteTo(w io.Writer) (wn int64, err error) { + ref.scan(ref.cursor, func(b []byte) bool { + var n int + n, err = w.Write(b) + wn += int64(n) + return err == nil + }) + ref.cursor += wn + return +} + +func (ref *pageRef) scan(off int64, f func([]byte) bool) { + begin := ref.offset + off + end := ref.offset + int64(ref.length) + ref.pages.scan(begin, end, f) +} + +var ( + _ io.Closer = (*pageRef)(nil) + _ io.Seeker = (*pageRef)(nil) + _ io.Reader = (*pageRef)(nil) + _ io.ReaderAt = (*pageRef)(nil) + _ io.WriterTo = (*pageRef)(nil) +) + +type pageRefAllocator struct { + refs []pageRef + head int + size int +} + +func (a *pageRefAllocator) newPageRef() *pageRef { + if a.head == len(a.refs) { + a.refs = make([]pageRef, a.size) + a.head = 0 + } + ref := &a.refs[a.head] + a.head++ + return ref +} + +func seek(cursor, limit, offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + // absolute offset + case io.SeekCurrent: + offset = cursor + offset + case io.SeekEnd: + offset = limit - offset + default: + return -1, fmt.Errorf("seek: invalid whence value: %d", whence) + } + if offset < 0 { + offset = 0 + } + if offset > limit { + offset = limit + } + return offset, nil +} + +func closeBytes(b Bytes) { + if b != nil { + b.Close() + } +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/cluster.go b/vendor/github.com/segmentio/kafka-go/protocol/cluster.go new file mode 100644 index 00000000000..5dd3455adce --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/cluster.go @@ -0,0 +1,143 @@ +package protocol + +import ( + "fmt" + "sort" + "strings" + "text/tabwriter" +) + +type Cluster struct { + ClusterID string + Controller int32 + Brokers map[int32]Broker + Topics map[string]Topic +} + +func (c Cluster) BrokerIDs() []int32 { + brokerIDs := make([]int32, 0, len(c.Brokers)) + for id := range c.Brokers { + brokerIDs = append(brokerIDs, id) + } + sort.Slice(brokerIDs, func(i, j int) bool { + return brokerIDs[i] < brokerIDs[j] + }) + return brokerIDs +} + +func (c Cluster) TopicNames() []string { + topicNames := make([]string, 0, len(c.Topics)) + for name := range c.Topics { + topicNames = append(topicNames, name) + } + sort.Strings(topicNames) + return topicNames +} + +func (c Cluster) IsZero() bool { + return c.ClusterID == "" && c.Controller == 0 && len(c.Brokers) == 0 && len(c.Topics) == 0 +} + +func (c Cluster) Format(w fmt.State, _ rune) { + tw := new(tabwriter.Writer) + fmt.Fprintf(w, "CLUSTER: %q\n\n", c.ClusterID) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " BROKER\tHOST\tPORT\tRACK\tCONTROLLER\n") + + for _, id := range c.BrokerIDs() { + broker := c.Brokers[id] + fmt.Fprintf(tw, " %d\t%s\t%d\t%s\t%t\n", broker.ID, broker.Host, broker.Port, broker.Rack, broker.ID == c.Controller) + } + + tw.Flush() + fmt.Fprintln(w) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " TOPIC\tPARTITIONS\tBROKERS\n") + topicNames := c.TopicNames() + brokers := make(map[int32]struct{}, len(c.Brokers)) + brokerIDs := make([]int32, 0, len(c.Brokers)) + + for _, name := range topicNames { + topic := c.Topics[name] + + for _, p := range topic.Partitions { + for _, id := range p.Replicas { + brokers[id] = struct{}{} + } + } + + for id := range brokers { + brokerIDs = append(brokerIDs, id) + } + + fmt.Fprintf(tw, " %s\t%d\t%s\n", topic.Name, len(topic.Partitions), formatBrokerIDs(brokerIDs, -1)) + + for id := range brokers { + delete(brokers, id) + } + + brokerIDs = brokerIDs[:0] + } + + tw.Flush() + fmt.Fprintln(w) + + if w.Flag('+') { + for _, name := range topicNames { + fmt.Fprintf(w, " TOPIC: %q\n\n", name) + + tw.Init(w, 0, 8, 2, ' ', 0) + fmt.Fprint(tw, " PARTITION\tREPLICAS\tISR\tOFFLINE\n") + + for _, p := range c.Topics[name].Partitions { + fmt.Fprintf(tw, " %d\t%s\t%s\t%s\n", p.ID, + formatBrokerIDs(p.Replicas, -1), + formatBrokerIDs(p.ISR, p.Leader), + formatBrokerIDs(p.Offline, -1), + ) + } + + tw.Flush() + fmt.Fprintln(w) + } + } +} + +func formatBrokerIDs(brokerIDs []int32, leader int32) string { + if len(brokerIDs) == 0 { + return "" + } + + if len(brokerIDs) == 1 { + return itoa(brokerIDs[0]) + } + + sort.Slice(brokerIDs, func(i, j int) bool { + id1 := brokerIDs[i] + id2 := brokerIDs[j] + + if id1 == leader { + return true + } + + if id2 == leader { + return false + } + + return id1 < id2 + }) + + brokerNames := make([]string, len(brokerIDs)) + + for i, id := range brokerIDs { + brokerNames[i] = itoa(id) + } + + return strings.Join(brokerNames, ",") +} + +var ( + _ fmt.Formatter = Cluster{} +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/conn.go b/vendor/github.com/segmentio/kafka-go/protocol/conn.go new file mode 100644 index 00000000000..d08a577f63f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/conn.go @@ -0,0 +1,100 @@ +package protocol + +import ( + "bufio" + "fmt" + "net" + "sync/atomic" + "time" +) + +type Conn struct { + buffer *bufio.Reader + conn net.Conn + clientID string + idgen int32 + versions atomic.Value // map[ApiKey]int16 +} + +func NewConn(conn net.Conn, clientID string) *Conn { + return &Conn{ + buffer: bufio.NewReader(conn), + conn: conn, + clientID: clientID, + } +} + +func (c *Conn) String() string { + return fmt.Sprintf("kafka://%s@%s->%s", c.clientID, c.LocalAddr(), c.RemoteAddr()) +} + +func (c *Conn) Close() error { + return c.conn.Close() +} + +func (c *Conn) Discard(n int) (int, error) { + return c.buffer.Discard(n) +} + +func (c *Conn) Peek(n int) ([]byte, error) { + return c.buffer.Peek(n) +} + +func (c *Conn) Read(b []byte) (int, error) { + return c.buffer.Read(b) +} + +func (c *Conn) Write(b []byte) (int, error) { + return c.conn.Write(b) +} + +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *Conn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *Conn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +func (c *Conn) SetVersions(versions map[ApiKey]int16) { + connVersions := make(map[ApiKey]int16, len(versions)) + + for k, v := range versions { + connVersions[k] = v + } + + c.versions.Store(connVersions) +} + +func (c *Conn) RoundTrip(msg Message) (Message, error) { + correlationID := atomic.AddInt32(&c.idgen, +1) + versions, _ := c.versions.Load().(map[ApiKey]int16) + apiVersion := versions[msg.ApiKey()] + + if p, _ := msg.(PreparedMessage); p != nil { + p.Prepare(apiVersion) + } + + if raw, ok := msg.(RawExchanger); ok && raw.Required(versions) { + return raw.RawExchange(c) + } + + return RoundTrip(c, apiVersion, correlationID, c.clientID, msg) +} + +var ( + _ net.Conn = (*Conn)(nil) + _ bufferedReader = (*Conn)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/consumer/consumer.go b/vendor/github.com/segmentio/kafka-go/protocol/consumer/consumer.go new file mode 100644 index 00000000000..ab643105d8a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/consumer/consumer.go @@ -0,0 +1,21 @@ +package consumer + +const MaxVersionSupported = 1 + +type Subscription struct { + Version int16 `kafka:"min=v0,max=v1"` + Topics []string `kafka:"min=v0,max=v1"` + UserData []byte `kafka:"min=v0,max=v1,nullable"` + OwnedPartitions []TopicPartition `kafka:"min=v1,max=v1"` +} + +type Assignment struct { + Version int16 `kafka:"min=v0,max=v1"` + AssignedPartitions []TopicPartition `kafka:"min=v0,max=v1"` + UserData []byte `kafka:"min=v0,max=v1,nullable"` +} + +type TopicPartition struct { + Topic string `kafka:"min=v0,max=v1"` + Partitions []int32 `kafka:"min=v0,max=v1"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/createacls/createacls.go b/vendor/github.com/segmentio/kafka-go/protocol/createacls/createacls.go new file mode 100644 index 00000000000..893be44dd35 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/createacls/createacls.go @@ -0,0 +1,49 @@ +package createacls + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v2,tag"` + + Creations []RequestACLs `kafka:"min=v0,max=v2"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateAcls } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestACLs struct { + ResourceType int8 `kafka:"min=v0,max=v2"` + ResourceName string `kafka:"min=v0,max=v2"` + ResourcePatternType int8 `kafka:"min=v0,max=v2"` + Principal string `kafka:"min=v0,max=v2"` + Host string `kafka:"min=v0,max=v2"` + Operation int8 `kafka:"min=v0,max=v2"` + PermissionType int8 `kafka:"min=v0,max=v2"` +} + +type Response struct { + // We need at least one tagged field to indicate that v2+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v2,max=v2,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v2"` + Results []ResponseACLs `kafka:"min=v0,max=v2"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateAcls } + +type ResponseACLs struct { + ErrorCode int16 `kafka:"min=v0,max=v2"` + ErrorMessage string `kafka:"min=v0,max=v2,nullable"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/createpartitions/createpartitions.go b/vendor/github.com/segmentio/kafka-go/protocol/createpartitions/createpartitions.go new file mode 100644 index 00000000000..4b86b4408a5 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/createpartitions/createpartitions.go @@ -0,0 +1,46 @@ +package createpartitions + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_CreatePartitions. +// TODO: Support version 2. +type Request struct { + Topics []RequestTopic `kafka:"min=v0,max=v1"` + TimeoutMs int32 `kafka:"min=v0,max=v1"` + ValidateOnly bool `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreatePartitions } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v1"` + Count int32 `kafka:"min=v0,max=v1"` + Assignments []RequestAssignment `kafka:"min=v0,max=v1,nullable"` +} + +type RequestAssignment struct { + BrokerIDs []int32 `kafka:"min=v0,max=v1"` +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` + Results []ResponseResult `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreatePartitions } + +type ResponseResult struct { + Name string `kafka:"min=v0,max=v1"` + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/createtopics/createtopics.go b/vendor/github.com/segmentio/kafka-go/protocol/createtopics/createtopics.go new file mode 100644 index 00000000000..62c597fb1ea --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/createtopics/createtopics.go @@ -0,0 +1,74 @@ +package createtopics + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that v5+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v5,max=v5,tag"` + + Topics []RequestTopic `kafka:"min=v0,max=v5"` + TimeoutMs int32 `kafka:"min=v0,max=v5"` + ValidateOnly bool `kafka:"min=v1,max=v5"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.CreateTopics } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v5"` + NumPartitions int32 `kafka:"min=v0,max=v5"` + ReplicationFactor int16 `kafka:"min=v0,max=v5"` + Assignments []RequestAssignment `kafka:"min=v0,max=v5"` + Configs []RequestConfig `kafka:"min=v0,max=v5"` +} + +type RequestAssignment struct { + PartitionIndex int32 `kafka:"min=v0,max=v5"` + BrokerIDs []int32 `kafka:"min=v0,max=v5"` +} + +type RequestConfig struct { + Name string `kafka:"min=v0,max=v5"` + Value string `kafka:"min=v0,max=v5,nullable"` +} + +type Response struct { + // We need at least one tagged field to indicate that v5+ uses "flexible" + // messages. + _ struct{} `kafka:"min=v5,max=v5,tag"` + + ThrottleTimeMs int32 `kafka:"min=v2,max=v5"` + Topics []ResponseTopic `kafka:"min=v0,max=v5"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.CreateTopics } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v5"` + ErrorCode int16 `kafka:"min=v0,max=v5"` + ErrorMessage string `kafka:"min=v1,max=v5,nullable"` + NumPartitions int32 `kafka:"min=v5,max=v5"` + ReplicationFactor int16 `kafka:"min=v5,max=v5"` + + Configs []ResponseTopicConfig `kafka:"min=v5,max=v5"` +} + +type ResponseTopicConfig struct { + Name string `kafka:"min=v5,max=v5"` + Value string `kafka:"min=v5,max=v5,nullable"` + ReadOnly bool `kafka:"min=v5,max=v5"` + ConfigSource int8 `kafka:"min=v5,max=v5"` + IsSensitive bool `kafka:"min=v5,max=v5"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/decode.go b/vendor/github.com/segmentio/kafka-go/protocol/decode.go new file mode 100644 index 00000000000..5bf61ffa420 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/decode.go @@ -0,0 +1,537 @@ +package protocol + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "math" + "reflect" + "sync" + "sync/atomic" +) + +type discarder interface { + Discard(int) (int, error) +} + +type decoder struct { + reader io.Reader + remain int + buffer [8]byte + err error + table *crc32.Table + crc32 uint32 +} + +func (d *decoder) Reset(r io.Reader, n int) { + d.reader = r + d.remain = n + d.buffer = [8]byte{} + d.err = nil + d.table = nil + d.crc32 = 0 +} + +func (d *decoder) Read(b []byte) (int, error) { + if d.err != nil { + return 0, d.err + } + if d.remain == 0 { + return 0, io.EOF + } + if len(b) > d.remain { + b = b[:d.remain] + } + n, err := d.reader.Read(b) + if n > 0 && d.table != nil { + d.crc32 = crc32.Update(d.crc32, d.table, b[:n]) + } + d.remain -= n + return n, err +} + +func (d *decoder) ReadByte() (byte, error) { + c := d.readByte() + return c, d.err +} + +func (d *decoder) done() bool { + return d.remain == 0 || d.err != nil +} + +func (d *decoder) setCRC(table *crc32.Table) { + d.table, d.crc32 = table, 0 +} + +func (d *decoder) decodeBool(v value) { + v.setBool(d.readBool()) +} + +func (d *decoder) decodeInt8(v value) { + v.setInt8(d.readInt8()) +} + +func (d *decoder) decodeInt16(v value) { + v.setInt16(d.readInt16()) +} + +func (d *decoder) decodeInt32(v value) { + v.setInt32(d.readInt32()) +} + +func (d *decoder) decodeInt64(v value) { + v.setInt64(d.readInt64()) +} + +func (d *decoder) decodeFloat64(v value) { + v.setFloat64(d.readFloat64()) +} + +func (d *decoder) decodeString(v value) { + v.setString(d.readString()) +} + +func (d *decoder) decodeCompactString(v value) { + v.setString(d.readCompactString()) +} + +func (d *decoder) decodeBytes(v value) { + v.setBytes(d.readBytes()) +} + +func (d *decoder) decodeCompactBytes(v value) { + v.setBytes(d.readCompactBytes()) +} + +func (d *decoder) decodeArray(v value, elemType reflect.Type, decodeElem decodeFunc) { + if n := d.readInt32(); n < 0 { + v.setArray(array{}) + } else { + a := makeArray(elemType, int(n)) + for i := 0; i < int(n) && d.remain > 0; i++ { + decodeElem(d, a.index(i)) + } + v.setArray(a) + } +} + +func (d *decoder) decodeCompactArray(v value, elemType reflect.Type, decodeElem decodeFunc) { + if n := d.readUnsignedVarInt(); n < 1 { + v.setArray(array{}) + } else { + a := makeArray(elemType, int(n-1)) + for i := 0; i < int(n-1) && d.remain > 0; i++ { + decodeElem(d, a.index(i)) + } + v.setArray(a) + } +} + +func (d *decoder) discardAll() { + d.discard(d.remain) +} + +func (d *decoder) discard(n int) { + if n > d.remain { + n = d.remain + } + var err error + if r, _ := d.reader.(discarder); r != nil { + n, err = r.Discard(n) + d.remain -= n + } else { + _, err = io.Copy(ioutil.Discard, d) + } + d.setError(err) +} + +func (d *decoder) read(n int) []byte { + b := make([]byte, n) + n, err := io.ReadFull(d, b) + b = b[:n] + d.setError(err) + return b +} + +func (d *decoder) writeTo(w io.Writer, n int) { + limit := d.remain + if n < limit { + d.remain = n + } + c, err := io.Copy(w, d) + if int(c) < n && err == nil { + err = io.ErrUnexpectedEOF + } + d.remain = limit - int(c) + d.setError(err) +} + +func (d *decoder) setError(err error) { + if d.err == nil && err != nil { + d.err = err + d.discardAll() + } +} + +func (d *decoder) readFull(b []byte) bool { + n, err := io.ReadFull(d, b) + d.setError(err) + return n == len(b) +} + +func (d *decoder) readByte() byte { + if d.readFull(d.buffer[:1]) { + return d.buffer[0] + } + return 0 +} + +func (d *decoder) readBool() bool { + return d.readByte() != 0 +} + +func (d *decoder) readInt8() int8 { + if d.readFull(d.buffer[:1]) { + return readInt8(d.buffer[:1]) + } + return 0 +} + +func (d *decoder) readInt16() int16 { + if d.readFull(d.buffer[:2]) { + return readInt16(d.buffer[:2]) + } + return 0 +} + +func (d *decoder) readInt32() int32 { + if d.readFull(d.buffer[:4]) { + return readInt32(d.buffer[:4]) + } + return 0 +} + +func (d *decoder) readInt64() int64 { + if d.readFull(d.buffer[:8]) { + return readInt64(d.buffer[:8]) + } + return 0 +} + +func (d *decoder) readFloat64() float64 { + if d.readFull(d.buffer[:8]) { + return readFloat64(d.buffer[:8]) + } + return 0 +} + +func (d *decoder) readString() string { + if n := d.readInt16(); n < 0 { + return "" + } else { + return bytesToString(d.read(int(n))) + } +} + +func (d *decoder) readVarString() string { + if n := d.readVarInt(); n < 0 { + return "" + } else { + return bytesToString(d.read(int(n))) + } +} + +func (d *decoder) readCompactString() string { + if n := d.readUnsignedVarInt(); n < 1 { + return "" + } else { + return bytesToString(d.read(int(n - 1))) + } +} + +func (d *decoder) readBytes() []byte { + if n := d.readInt32(); n < 0 { + return nil + } else { + return d.read(int(n)) + } +} + +func (d *decoder) readVarBytes() []byte { + if n := d.readVarInt(); n < 0 { + return nil + } else { + return d.read(int(n)) + } +} + +func (d *decoder) readCompactBytes() []byte { + if n := d.readUnsignedVarInt(); n < 1 { + return nil + } else { + return d.read(int(n - 1)) + } +} + +func (d *decoder) readVarInt() int64 { + n := 11 // varints are at most 11 bytes + + if n > d.remain { + n = d.remain + } + + x := uint64(0) + s := uint(0) + + for n > 0 { + b := d.readByte() + + if (b & 0x80) == 0 { + x |= uint64(b) << s + return int64(x>>1) ^ -(int64(x) & 1) + } + + x |= uint64(b&0x7f) << s + s += 7 + n-- + } + + d.setError(fmt.Errorf("cannot decode varint from input stream")) + return 0 +} + +func (d *decoder) readUnsignedVarInt() uint64 { + n := 11 // varints are at most 11 bytes + + if n > d.remain { + n = d.remain + } + + x := uint64(0) + s := uint(0) + + for n > 0 { + b := d.readByte() + + if (b & 0x80) == 0 { + x |= uint64(b) << s + return x + } + + x |= uint64(b&0x7f) << s + s += 7 + n-- + } + + d.setError(fmt.Errorf("cannot decode unsigned varint from input stream")) + return 0 +} + +type decodeFunc func(*decoder, value) + +var ( + _ io.Reader = (*decoder)(nil) + _ io.ByteReader = (*decoder)(nil) + + readerFrom = reflect.TypeOf((*io.ReaderFrom)(nil)).Elem() +) + +func decodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc { + if reflect.PtrTo(typ).Implements(readerFrom) { + return readerDecodeFuncOf(typ) + } + switch typ.Kind() { + case reflect.Bool: + return (*decoder).decodeBool + case reflect.Int8: + return (*decoder).decodeInt8 + case reflect.Int16: + return (*decoder).decodeInt16 + case reflect.Int32: + return (*decoder).decodeInt32 + case reflect.Int64: + return (*decoder).decodeInt64 + case reflect.Float64: + return (*decoder).decodeFloat64 + case reflect.String: + return stringDecodeFuncOf(flexible, tag) + case reflect.Struct: + return structDecodeFuncOf(typ, version, flexible) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { // []byte + return bytesDecodeFuncOf(flexible, tag) + } + return arrayDecodeFuncOf(typ, version, flexible, tag) + default: + panic("unsupported type: " + typ.String()) + } +} + +func stringDecodeFuncOf(flexible bool, tag structTag) decodeFunc { + if flexible { + // In flexible messages, all strings are compact + return (*decoder).decodeCompactString + } + return (*decoder).decodeString +} + +func bytesDecodeFuncOf(flexible bool, tag structTag) decodeFunc { + if flexible { + // In flexible messages, all arrays are compact + return (*decoder).decodeCompactBytes + } + return (*decoder).decodeBytes +} + +func structDecodeFuncOf(typ reflect.Type, version int16, flexible bool) decodeFunc { + type field struct { + decode decodeFunc + index index + tagID int + } + + var fields []field + taggedFields := map[int]*field{} + + forEachStructField(typ, func(typ reflect.Type, index index, tag string) { + forEachStructTag(tag, func(tag structTag) bool { + if tag.MinVersion <= version && version <= tag.MaxVersion { + f := field{ + decode: decodeFuncOf(typ, version, flexible, tag), + index: index, + tagID: tag.TagID, + } + + if tag.TagID < -1 { + // Normal required field + fields = append(fields, f) + } else { + // Optional tagged field (flexible messages only) + taggedFields[tag.TagID] = &f + } + return false + } + return true + }) + }) + + return func(d *decoder, v value) { + for i := range fields { + f := &fields[i] + f.decode(d, v.fieldByIndex(f.index)) + } + + if flexible { + // See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details of tag buffers in "flexible" messages. + n := int(d.readUnsignedVarInt()) + + for i := 0; i < n; i++ { + tagID := int(d.readUnsignedVarInt()) + size := int(d.readUnsignedVarInt()) + + f, ok := taggedFields[tagID] + if ok { + f.decode(d, v.fieldByIndex(f.index)) + } else { + d.read(size) + } + } + } + } +} + +func arrayDecodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) decodeFunc { + elemType := typ.Elem() + elemFunc := decodeFuncOf(elemType, version, flexible, tag) + if flexible { + // In flexible messages, all arrays are compact + return func(d *decoder, v value) { d.decodeCompactArray(v, elemType, elemFunc) } + } + + return func(d *decoder, v value) { d.decodeArray(v, elemType, elemFunc) } +} + +func readerDecodeFuncOf(typ reflect.Type) decodeFunc { + typ = reflect.PtrTo(typ) + return func(d *decoder, v value) { + if d.err == nil { + _, err := v.iface(typ).(io.ReaderFrom).ReadFrom(d) + if err != nil { + d.setError(err) + } + } + } +} + +func readInt8(b []byte) int8 { + return int8(b[0]) +} + +func readInt16(b []byte) int16 { + return int16(binary.BigEndian.Uint16(b)) +} + +func readInt32(b []byte) int32 { + return int32(binary.BigEndian.Uint32(b)) +} + +func readInt64(b []byte) int64 { + return int64(binary.BigEndian.Uint64(b)) +} + +func readFloat64(b []byte) float64 { + return math.Float64frombits(binary.BigEndian.Uint64(b)) +} + +func Unmarshal(data []byte, version int16, value interface{}) error { + typ := elemTypeOf(value) + cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc) + key := versionedType{typ: typ, version: version} + decode := cache[key] + + if decode == nil { + decode = decodeFuncOf(reflect.TypeOf(value).Elem(), version, false, structTag{ + MinVersion: -1, + MaxVersion: -1, + TagID: -2, + Compact: true, + Nullable: true, + }) + + newCache := make(map[versionedType]decodeFunc, len(cache)+1) + newCache[key] = decode + + for typ, fun := range cache { + newCache[typ] = fun + } + + unmarshalers.Store(newCache) + } + + d, _ := decoders.Get().(*decoder) + if d == nil { + d = &decoder{reader: bytes.NewReader(nil)} + } + + d.remain = len(data) + r, _ := d.reader.(*bytes.Reader) + r.Reset(data) + + defer func() { + r.Reset(nil) + d.Reset(r, 0) + decoders.Put(d) + }() + + decode(d, valueOf(value)) + return dontExpectEOF(d.err) +} + +var ( + decoders sync.Pool // *decoder + unmarshalers atomic.Value // map[versionedType]decodeFunc +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/deletegroups/deletegroups.go b/vendor/github.com/segmentio/kafka-go/protocol/deletegroups/deletegroups.go new file mode 100644 index 00000000000..759dfc2feff --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/deletegroups/deletegroups.go @@ -0,0 +1,45 @@ +package deletegroups + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v2,max=v2,tag"` + + GroupIDs []string `kafka:"min=v0,max=v2"` +} + +func (r *Request) Group() string { + // use first group to determine group coordinator + if len(r.GroupIDs) > 0 { + return r.GroupIDs[0] + } + return "" +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteGroups } + +var ( + _ protocol.GroupMessage = (*Request)(nil) +) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v2,max=v2,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v2"` + Responses []ResponseGroup `kafka:"min=v0,max=v2"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteGroups } + +type ResponseGroup struct { + GroupID string `kafka:"min=v0,max=v2"` + ErrorCode int16 `kafka:"min=v0,max=v2"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/deletetopics/deletetopics.go b/vendor/github.com/segmentio/kafka-go/protocol/deletetopics/deletetopics.go new file mode 100644 index 00000000000..3af5a001447 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/deletetopics/deletetopics.go @@ -0,0 +1,34 @@ +package deletetopics + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + TopicNames []string `kafka:"min=v0,max=v3"` + TimeoutMs int32 `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DeleteTopics } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v3"` + Responses []ResponseTopic `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DeleteTopics } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describeclientquotas/describeclientquotas.go b/vendor/github.com/segmentio/kafka-go/protocol/describeclientquotas/describeclientquotas.go new file mode 100644 index 00000000000..e137776bf88 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/describeclientquotas/describeclientquotas.go @@ -0,0 +1,68 @@ +package describeclientquotas + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Components []Component `kafka:"min=v0,max=v1"` + Strict bool `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeClientQuotas } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Component struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + EntityType string `kafka:"min=v0,max=v1"` + MatchType int8 `kafka:"min=v0,max=v1"` + Match string `kafka:"min=v0,max=v1,nullable"` +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + ThrottleTimeMs int32 `kafka:"min=v0,max=v1"` + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"` + Entries []ResponseQuotas `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeClientQuotas } + +type Entity struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + EntityType string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"` + EntityName string `kafka:"min=v0,max=v0,nullable|min=v1,max=v1,nullable,compact"` +} + +type Value struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Key string `kafka:"min=v0,max=v0|min=v1,max=v1,compact"` + Value float64 `kafka:"min=v0,max=v1"` +} + +type ResponseQuotas struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v1,max=v1,tag"` + Entities []Entity `kafka:"min=v0,max=v1"` + Values []Value `kafka:"min=v0,max=v1"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describeconfigs/describeconfigs.go b/vendor/github.com/segmentio/kafka-go/protocol/describeconfigs/describeconfigs.go new file mode 100644 index 00000000000..09c91841ffd --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/describeconfigs/describeconfigs.go @@ -0,0 +1,129 @@ +package describeconfigs + +import ( + "strconv" + + "github.com/segmentio/kafka-go/protocol" +) + +const ( + resourceTypeBroker int8 = 4 +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeConfigs +type Request struct { + Resources []RequestResource `kafka:"min=v0,max=v3"` + IncludeSynonyms bool `kafka:"min=v1,max=v3"` + IncludeDocumentation bool `kafka:"min=v3,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeConfigs } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + // Broker metadata requests must be sent to the associated broker + for _, resource := range r.Resources { + if resource.ResourceType == resourceTypeBroker { + brokerID, err := strconv.Atoi(resource.ResourceName) + if err != nil { + return protocol.Broker{}, err + } + + return cluster.Brokers[int32(brokerID)], nil + } + } + + return cluster.Brokers[cluster.Controller], nil +} + +func (r *Request) Split(cluster protocol.Cluster) ( + []protocol.Message, + protocol.Merger, + error, +) { + messages := []protocol.Message{} + topicsMessage := Request{} + + for _, resource := range r.Resources { + // Split out broker requests to separate brokers + if resource.ResourceType == resourceTypeBroker { + messages = append(messages, &Request{ + Resources: []RequestResource{resource}, + }) + } else { + topicsMessage.Resources = append( + topicsMessage.Resources, resource, + ) + } + } + + if len(topicsMessage.Resources) > 0 { + messages = append(messages, &topicsMessage) + } + + return messages, new(Response), nil +} + +type RequestResource struct { + ResourceType int8 `kafka:"min=v0,max=v3"` + ResourceName string `kafka:"min=v0,max=v3"` + ConfigNames []string `kafka:"min=v0,max=v3,nullable"` +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + Resources []ResponseResource `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeConfigs } + +func (r *Response) Merge(requests []protocol.Message, results []interface{}) ( + protocol.Message, + error, +) { + response := &Response{} + + for _, result := range results { + m, err := protocol.Result(result) + if err != nil { + return nil, err + } + response.Resources = append( + response.Resources, + m.(*Response).Resources..., + ) + } + + return response, nil +} + +type ResponseResource struct { + ErrorCode int16 `kafka:"min=v0,max=v3"` + ErrorMessage string `kafka:"min=v0,max=v3,nullable"` + ResourceType int8 `kafka:"min=v0,max=v3"` + ResourceName string `kafka:"min=v0,max=v3"` + ConfigEntries []ResponseConfigEntry `kafka:"min=v0,max=v3"` +} + +type ResponseConfigEntry struct { + ConfigName string `kafka:"min=v0,max=v3"` + ConfigValue string `kafka:"min=v0,max=v3,nullable"` + ReadOnly bool `kafka:"min=v0,max=v3"` + IsDefault bool `kafka:"min=v0,max=v0"` + ConfigSource int8 `kafka:"min=v1,max=v3"` + IsSensitive bool `kafka:"min=v0,max=v3"` + ConfigSynonyms []ResponseConfigSynonym `kafka:"min=v1,max=v3"` + ConfigType int8 `kafka:"min=v3,max=v3"` + ConfigDocumentation string `kafka:"min=v3,max=v3,nullable"` +} + +type ResponseConfigSynonym struct { + ConfigName string `kafka:"min=v1,max=v3"` + ConfigValue string `kafka:"min=v1,max=v3,nullable"` + ConfigSource int8 `kafka:"min=v1,max=v3"` +} + +var _ protocol.BrokerMessage = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go b/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go new file mode 100644 index 00000000000..a4d12048a0c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/describegroups/describegroups.go @@ -0,0 +1,85 @@ +package describegroups + +import ( + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_DescribeGroups +type Request struct { + Groups []string `kafka:"min=v0,max=v4"` + IncludeAuthorizedOperations bool `kafka:"min=v3,max=v4"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.DescribeGroups } + +func (r *Request) Group() string { + return r.Groups[0] +} + +func (r *Request) Split(cluster protocol.Cluster) ( + []protocol.Message, + protocol.Merger, + error, +) { + messages := []protocol.Message{} + + // Split requests by group since they'll need to go to different coordinators. + for _, group := range r.Groups { + messages = append( + messages, + &Request{ + Groups: []string{group}, + IncludeAuthorizedOperations: r.IncludeAuthorizedOperations, + }, + ) + } + + return messages, new(Response), nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v4"` + Groups []ResponseGroup `kafka:"min=v0,max=v4"` +} + +type ResponseGroup struct { + ErrorCode int16 `kafka:"min=v0,max=v4"` + GroupID string `kafka:"min=v0,max=v4"` + GroupState string `kafka:"min=v0,max=v4"` + ProtocolType string `kafka:"min=v0,max=v4"` + ProtocolData string `kafka:"min=v0,max=v4"` + Members []ResponseGroupMember `kafka:"min=v0,max=v4"` + AuthorizedOperations int32 `kafka:"min=v3,max=v4"` +} + +type ResponseGroupMember struct { + MemberID string `kafka:"min=v0,max=v4"` + GroupInstanceID string `kafka:"min=v4,max=v4,nullable"` + ClientID string `kafka:"min=v0,max=v4"` + ClientHost string `kafka:"min=v0,max=v4"` + MemberMetadata []byte `kafka:"min=v0,max=v4"` + MemberAssignment []byte `kafka:"min=v0,max=v4"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.DescribeGroups } + +func (r *Response) Merge(requests []protocol.Message, results []interface{}) ( + protocol.Message, + error, +) { + response := &Response{} + + for _, result := range results { + m, err := protocol.Result(result) + if err != nil { + return nil, err + } + response.Groups = append(response.Groups, m.(*Response).Groups...) + } + + return response, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/electleaders/electleaders.go b/vendor/github.com/segmentio/kafka-go/protocol/electleaders/electleaders.go new file mode 100644 index 00000000000..cd36ff5d70e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/electleaders/electleaders.go @@ -0,0 +1,44 @@ +package electleaders + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ElectLeaders +type Request struct { + ElectionType int8 `kafka:"min=v1,max=v1"` + TopicPartitions []RequestTopicPartitions `kafka:"min=v0,max=v1"` + TimeoutMs int32 `kafka:"min=v0,max=v1"` +} + +type RequestTopicPartitions struct { + Topic string `kafka:"min=v0,max=v1"` + PartitionIDs []int32 `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.ElectLeaders } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + ThrottleTime int32 `kafka:"min=v0,max=v1"` + ErrorCode int16 `kafka:"min=v1,max=v1"` + ReplicaElectionResults []ResponseReplicaElectionResult `kafka:"min=v0,max=v1"` +} + +type ResponseReplicaElectionResult struct { + Topic string `kafka:"min=v0,max=v1"` + PartitionResults []ResponsePartitionResult `kafka:"min=v0,max=v1"` +} + +type ResponsePartitionResult struct { + PartitionID int32 `kafka:"min=v0,max=v1"` + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.ElectLeaders } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/encode.go b/vendor/github.com/segmentio/kafka-go/protocol/encode.go new file mode 100644 index 00000000000..bd1633671c5 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/encode.go @@ -0,0 +1,606 @@ +package protocol + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "math" + "reflect" + "sync" + "sync/atomic" +) + +type encoder struct { + writer io.Writer + err error + table *crc32.Table + crc32 uint32 + buffer [32]byte +} + +type encoderChecksum struct { + reader io.Reader + encoder *encoder +} + +func (e *encoderChecksum) Read(b []byte) (int, error) { + n, err := e.reader.Read(b) + if n > 0 { + e.encoder.update(b[:n]) + } + return n, err +} + +func (e *encoder) Reset(w io.Writer) { + e.writer = w + e.err = nil + e.table = nil + e.crc32 = 0 + e.buffer = [32]byte{} +} + +func (e *encoder) ReadFrom(r io.Reader) (int64, error) { + if e.table != nil { + r = &encoderChecksum{ + reader: r, + encoder: e, + } + } + return io.Copy(e.writer, r) +} + +func (e *encoder) Write(b []byte) (int, error) { + if e.err != nil { + return 0, e.err + } + n, err := e.writer.Write(b) + if n > 0 { + e.update(b[:n]) + } + if err != nil { + e.err = err + } + return n, err +} + +func (e *encoder) WriteByte(b byte) error { + e.buffer[0] = b + _, err := e.Write(e.buffer[:1]) + return err +} + +func (e *encoder) WriteString(s string) (int, error) { + // This implementation is an optimization to avoid the heap allocation that + // would occur when converting the string to a []byte to call crc32.Update. + // + // Strings are rarely long in the kafka protocol, so the use of a 32 byte + // buffer is a good comprise between keeping the encoder value small and + // limiting the number of calls to Write. + // + // We introduced this optimization because memory profiles on the benchmarks + // showed that most heap allocations were caused by this code path. + n := 0 + + for len(s) != 0 { + c := copy(e.buffer[:], s) + w, err := e.Write(e.buffer[:c]) + n += w + if err != nil { + return n, err + } + s = s[c:] + } + + return n, nil +} + +func (e *encoder) setCRC(table *crc32.Table) { + e.table, e.crc32 = table, 0 +} + +func (e *encoder) update(b []byte) { + if e.table != nil { + e.crc32 = crc32.Update(e.crc32, e.table, b) + } +} + +func (e *encoder) encodeBool(v value) { + b := int8(0) + if v.bool() { + b = 1 + } + e.writeInt8(b) +} + +func (e *encoder) encodeInt8(v value) { + e.writeInt8(v.int8()) +} + +func (e *encoder) encodeInt16(v value) { + e.writeInt16(v.int16()) +} + +func (e *encoder) encodeInt32(v value) { + e.writeInt32(v.int32()) +} + +func (e *encoder) encodeInt64(v value) { + e.writeInt64(v.int64()) +} + +func (e *encoder) encodeFloat64(v value) { + e.writeFloat64(v.float64()) +} + +func (e *encoder) encodeString(v value) { + e.writeString(v.string()) +} + +func (e *encoder) encodeCompactString(v value) { + e.writeCompactString(v.string()) +} + +func (e *encoder) encodeNullString(v value) { + e.writeNullString(v.string()) +} + +func (e *encoder) encodeCompactNullString(v value) { + e.writeCompactNullString(v.string()) +} + +func (e *encoder) encodeBytes(v value) { + e.writeBytes(v.bytes()) +} + +func (e *encoder) encodeCompactBytes(v value) { + e.writeCompactBytes(v.bytes()) +} + +func (e *encoder) encodeNullBytes(v value) { + e.writeNullBytes(v.bytes()) +} + +func (e *encoder) encodeCompactNullBytes(v value) { + e.writeCompactNullBytes(v.bytes()) +} + +func (e *encoder) encodeArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + n := a.length() + e.writeInt32(int32(n)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeCompactArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + n := a.length() + e.writeUnsignedVarInt(uint64(n + 1)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + if a.isNil() { + e.writeInt32(-1) + return + } + + n := a.length() + e.writeInt32(int32(n)) + + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) encodeCompactNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) { + a := v.array(elemType) + if a.isNil() { + e.writeUnsignedVarInt(0) + return + } + + n := a.length() + e.writeUnsignedVarInt(uint64(n + 1)) + for i := 0; i < n; i++ { + encodeElem(e, a.index(i)) + } +} + +func (e *encoder) writeInt8(i int8) { + writeInt8(e.buffer[:1], i) + e.Write(e.buffer[:1]) +} + +func (e *encoder) writeInt16(i int16) { + writeInt16(e.buffer[:2], i) + e.Write(e.buffer[:2]) +} + +func (e *encoder) writeInt32(i int32) { + writeInt32(e.buffer[:4], i) + e.Write(e.buffer[:4]) +} + +func (e *encoder) writeInt64(i int64) { + writeInt64(e.buffer[:8], i) + e.Write(e.buffer[:8]) +} + +func (e *encoder) writeFloat64(f float64) { + writeFloat64(e.buffer[:8], f) + e.Write(e.buffer[:8]) +} + +func (e *encoder) writeString(s string) { + e.writeInt16(int16(len(s))) + e.WriteString(s) +} + +func (e *encoder) writeVarString(s string) { + e.writeVarInt(int64(len(s))) + e.WriteString(s) +} + +func (e *encoder) writeCompactString(s string) { + e.writeUnsignedVarInt(uint64(len(s)) + 1) + e.WriteString(s) +} + +func (e *encoder) writeNullString(s string) { + if s == "" { + e.writeInt16(-1) + } else { + e.writeInt16(int16(len(s))) + e.WriteString(s) + } +} + +func (e *encoder) writeCompactNullString(s string) { + if s == "" { + e.writeUnsignedVarInt(0) + } else { + e.writeUnsignedVarInt(uint64(len(s)) + 1) + e.WriteString(s) + } +} + +func (e *encoder) writeBytes(b []byte) { + e.writeInt32(int32(len(b))) + e.Write(b) +} + +func (e *encoder) writeCompactBytes(b []byte) { + e.writeUnsignedVarInt(uint64(len(b)) + 1) + e.Write(b) +} + +func (e *encoder) writeNullBytes(b []byte) { + if b == nil { + e.writeInt32(-1) + } else { + e.writeInt32(int32(len(b))) + e.Write(b) + } +} + +func (e *encoder) writeVarNullBytes(b []byte) { + if b == nil { + e.writeVarInt(-1) + } else { + e.writeVarInt(int64(len(b))) + e.Write(b) + } +} + +func (e *encoder) writeCompactNullBytes(b []byte) { + if b == nil { + e.writeUnsignedVarInt(0) + } else { + e.writeUnsignedVarInt(uint64(len(b)) + 1) + e.Write(b) + } +} + +func (e *encoder) writeNullBytesFrom(b Bytes) error { + if b == nil { + e.writeInt32(-1) + return nil + } else { + size := int64(b.Len()) + e.writeInt32(int32(size)) + n, err := io.Copy(e, b) + if err == nil && n != size { + err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF) + } + return err + } +} + +func (e *encoder) writeVarNullBytesFrom(b Bytes) error { + if b == nil { + e.writeVarInt(-1) + return nil + } else { + size := int64(b.Len()) + e.writeVarInt(size) + n, err := io.Copy(e, b) + if err == nil && n != size { + err = fmt.Errorf("size of nullable bytes does not match the number of bytes that were written (size=%d, written=%d): %w", size, n, io.ErrUnexpectedEOF) + } + return err + } +} + +func (e *encoder) writeVarInt(i int64) { + e.writeUnsignedVarInt(uint64((i << 1) ^ (i >> 63))) +} + +func (e *encoder) writeUnsignedVarInt(i uint64) { + b := e.buffer[:] + n := 0 + + for i >= 0x80 && n < len(b) { + b[n] = byte(i) | 0x80 + i >>= 7 + n++ + } + + if n < len(b) { + b[n] = byte(i) + n++ + } + + e.Write(b[:n]) +} + +type encodeFunc func(*encoder, value) + +var ( + _ io.ReaderFrom = (*encoder)(nil) + _ io.Writer = (*encoder)(nil) + _ io.ByteWriter = (*encoder)(nil) + _ io.StringWriter = (*encoder)(nil) + + writerTo = reflect.TypeOf((*io.WriterTo)(nil)).Elem() +) + +func encodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc { + if reflect.PtrTo(typ).Implements(writerTo) { + return writerEncodeFuncOf(typ) + } + switch typ.Kind() { + case reflect.Bool: + return (*encoder).encodeBool + case reflect.Int8: + return (*encoder).encodeInt8 + case reflect.Int16: + return (*encoder).encodeInt16 + case reflect.Int32: + return (*encoder).encodeInt32 + case reflect.Int64: + return (*encoder).encodeInt64 + case reflect.Float64: + return (*encoder).encodeFloat64 + case reflect.String: + return stringEncodeFuncOf(flexible, tag) + case reflect.Struct: + return structEncodeFuncOf(typ, version, flexible) + case reflect.Slice: + if typ.Elem().Kind() == reflect.Uint8 { // []byte + return bytesEncodeFuncOf(flexible, tag) + } + return arrayEncodeFuncOf(typ, version, flexible, tag) + default: + panic("unsupported type: " + typ.String()) + } +} + +func stringEncodeFuncOf(flexible bool, tag structTag) encodeFunc { + switch { + case flexible && tag.Nullable: + // In flexible messages, all strings are compact + return (*encoder).encodeCompactNullString + case flexible: + // In flexible messages, all strings are compact + return (*encoder).encodeCompactString + case tag.Nullable: + return (*encoder).encodeNullString + default: + return (*encoder).encodeString + } +} + +func bytesEncodeFuncOf(flexible bool, tag structTag) encodeFunc { + switch { + case flexible && tag.Nullable: + // In flexible messages, all arrays are compact + return (*encoder).encodeCompactNullBytes + case flexible: + // In flexible messages, all arrays are compact + return (*encoder).encodeCompactBytes + case tag.Nullable: + return (*encoder).encodeNullBytes + default: + return (*encoder).encodeBytes + } +} + +func structEncodeFuncOf(typ reflect.Type, version int16, flexible bool) encodeFunc { + type field struct { + encode encodeFunc + index index + tagID int + } + + var fields []field + var taggedFields []field + + forEachStructField(typ, func(typ reflect.Type, index index, tag string) { + if typ.Size() != 0 { // skip struct{} + forEachStructTag(tag, func(tag structTag) bool { + if tag.MinVersion <= version && version <= tag.MaxVersion { + f := field{ + encode: encodeFuncOf(typ, version, flexible, tag), + index: index, + tagID: tag.TagID, + } + + if tag.TagID < -1 { + // Normal required field + fields = append(fields, f) + } else { + // Optional tagged field (flexible messages only) + taggedFields = append(taggedFields, f) + } + return false + } + return true + }) + } + }) + + return func(e *encoder, v value) { + for i := range fields { + f := &fields[i] + f.encode(e, v.fieldByIndex(f.index)) + } + + if flexible { + // See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details of tag buffers in "flexible" messages. + e.writeUnsignedVarInt(uint64(len(taggedFields))) + + for i := range taggedFields { + f := &taggedFields[i] + e.writeUnsignedVarInt(uint64(f.tagID)) + + buf := &bytes.Buffer{} + se := &encoder{writer: buf} + f.encode(se, v.fieldByIndex(f.index)) + e.writeUnsignedVarInt(uint64(buf.Len())) + e.Write(buf.Bytes()) + } + } + } +} + +func arrayEncodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc { + elemType := typ.Elem() + elemFunc := encodeFuncOf(elemType, version, flexible, tag) + switch { + case flexible && tag.Nullable: + // In flexible messages, all arrays are compact + return func(e *encoder, v value) { e.encodeCompactNullArray(v, elemType, elemFunc) } + case flexible: + // In flexible messages, all arrays are compact + return func(e *encoder, v value) { e.encodeCompactArray(v, elemType, elemFunc) } + case tag.Nullable: + return func(e *encoder, v value) { e.encodeNullArray(v, elemType, elemFunc) } + default: + return func(e *encoder, v value) { e.encodeArray(v, elemType, elemFunc) } + } +} + +func writerEncodeFuncOf(typ reflect.Type) encodeFunc { + typ = reflect.PtrTo(typ) + return func(e *encoder, v value) { + // Optimization to write directly into the buffer when the encoder + // does no need to compute a crc32 checksum. + w := io.Writer(e) + if e.table == nil { + w = e.writer + } + _, err := v.iface(typ).(io.WriterTo).WriteTo(w) + if err != nil { + e.err = err + } + } +} + +func writeInt8(b []byte, i int8) { + b[0] = byte(i) +} + +func writeInt16(b []byte, i int16) { + binary.BigEndian.PutUint16(b, uint16(i)) +} + +func writeInt32(b []byte, i int32) { + binary.BigEndian.PutUint32(b, uint32(i)) +} + +func writeInt64(b []byte, i int64) { + binary.BigEndian.PutUint64(b, uint64(i)) +} + +func writeFloat64(b []byte, f float64) { + binary.BigEndian.PutUint64(b, math.Float64bits(f)) +} + +func Marshal(version int16, value interface{}) ([]byte, error) { + typ := typeOf(value) + cache, _ := marshalers.Load().(map[versionedType]encodeFunc) + key := versionedType{typ: typ, version: version} + encode := cache[key] + + if encode == nil { + encode = encodeFuncOf(reflect.TypeOf(value), version, false, structTag{ + MinVersion: -1, + MaxVersion: -1, + TagID: -2, + Compact: true, + Nullable: true, + }) + + newCache := make(map[versionedType]encodeFunc, len(cache)+1) + newCache[key] = encode + + for typ, fun := range cache { + newCache[typ] = fun + } + + marshalers.Store(newCache) + } + + e, _ := encoders.Get().(*encoder) + if e == nil { + e = &encoder{writer: new(bytes.Buffer)} + } + + b, _ := e.writer.(*bytes.Buffer) + defer func() { + b.Reset() + e.Reset(b) + encoders.Put(e) + }() + + encode(e, nonAddressableValueOf(value)) + + if e.err != nil { + return nil, e.err + } + + buf := b.Bytes() + out := make([]byte, len(buf)) + copy(out, buf) + return out, nil +} + +type versionedType struct { + typ _type + version int16 +} + +var ( + encoders sync.Pool // *encoder + marshalers atomic.Value // map[versionedType]encodeFunc +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/endtxn/endtxn.go b/vendor/github.com/segmentio/kafka-go/protocol/endtxn/endtxn.go new file mode 100644 index 00000000000..c3799cb8836 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/endtxn/endtxn.go @@ -0,0 +1,35 @@ +package endtxn + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + ProducerID int64 `kafka:"min=v0,max=v3"` + ProducerEpoch int16 `kafka:"min=v0,max=v3"` + Committed bool `kafka:"min=v0,max=v3"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.EndTxn } + +func (r *Request) Transaction() string { return r.TransactionalID } + +var _ protocol.TransactionalMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.EndTxn } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/error.go b/vendor/github.com/segmentio/kafka-go/protocol/error.go new file mode 100644 index 00000000000..52c5d083375 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/error.go @@ -0,0 +1,91 @@ +package protocol + +import ( + "fmt" +) + +// Error represents client-side protocol errors. +type Error string + +func (e Error) Error() string { return string(e) } + +func Errorf(msg string, args ...interface{}) Error { + return Error(fmt.Sprintf(msg, args...)) +} + +const ( + // ErrNoTopic is returned when a request needs to be sent to a specific. + ErrNoTopic Error = "topic not found" + + // ErrNoPartition is returned when a request needs to be sent to a specific + // partition, but the client did not find it in the cluster metadata. + ErrNoPartition Error = "topic partition not found" + + // ErrNoLeader is returned when a request needs to be sent to a partition + // leader, but the client could not determine what the leader was at this + // time. + ErrNoLeader Error = "topic partition has no leader" + + // ErrNoRecord is returned when attempting to write a message containing an + // empty record set (which kafka forbids). + // + // We handle this case client-side because kafka will close the connection + // that it received an empty produce request on, causing all concurrent + // requests to be aborted. + ErrNoRecord Error = "record set contains no records" + + // ErrNoReset is returned by ResetRecordReader when the record reader does + // not support being reset. + ErrNoReset Error = "record sequence does not support reset" +) + +type TopicError struct { + Topic string + Err error +} + +func NewTopicError(topic string, err error) *TopicError { + return &TopicError{Topic: topic, Err: err} +} + +func NewErrNoTopic(topic string) *TopicError { + return NewTopicError(topic, ErrNoTopic) +} + +func (e *TopicError) Error() string { + return fmt.Sprintf("%v (topic=%q)", e.Err, e.Topic) +} + +func (e *TopicError) Unwrap() error { + return e.Err +} + +type TopicPartitionError struct { + Topic string + Partition int32 + Err error +} + +func NewTopicPartitionError(topic string, partition int32, err error) *TopicPartitionError { + return &TopicPartitionError{ + Topic: topic, + Partition: partition, + Err: err, + } +} + +func NewErrNoPartition(topic string, partition int32) *TopicPartitionError { + return NewTopicPartitionError(topic, partition, ErrNoPartition) +} + +func NewErrNoLeader(topic string, partition int32) *TopicPartitionError { + return NewTopicPartitionError(topic, partition, ErrNoLeader) +} + +func (e *TopicPartitionError) Error() string { + return fmt.Sprintf("%v (topic=%q partition=%d)", e.Err, e.Topic, e.Partition) +} + +func (e *TopicPartitionError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/fetch/fetch.go b/vendor/github.com/segmentio/kafka-go/protocol/fetch/fetch.go new file mode 100644 index 00000000000..6ce7bae1b74 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/fetch/fetch.go @@ -0,0 +1,126 @@ +package fetch + +import ( + "fmt" + + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + ReplicaID int32 `kafka:"min=v0,max=v11"` + MaxWaitTime int32 `kafka:"min=v0,max=v11"` + MinBytes int32 `kafka:"min=v0,max=v11"` + MaxBytes int32 `kafka:"min=v3,max=v11"` + IsolationLevel int8 `kafka:"min=v4,max=v11"` + SessionID int32 `kafka:"min=v7,max=v11"` + SessionEpoch int32 `kafka:"min=v7,max=v11"` + Topics []RequestTopic `kafka:"min=v0,max=v11"` + ForgottenTopics []RequestForgottenTopic `kafka:"min=v7,max=v11"` + RackID string `kafka:"min=v11,max=v11"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.Fetch } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + broker := protocol.Broker{ID: -1} + + for i := range r.Topics { + t := &r.Topics[i] + + topic, ok := cluster.Topics[t.Topic] + if !ok { + return broker, NewError(protocol.NewErrNoTopic(t.Topic)) + } + + for j := range t.Partitions { + p := &t.Partitions[j] + + partition, ok := topic.Partitions[p.Partition] + if !ok { + return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition)) + } + + if b, ok := cluster.Brokers[partition.Leader]; !ok { + return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition)) + } else if broker.ID < 0 { + broker = b + } else if b.ID != broker.ID { + return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID)) + } + } + } + + return broker, nil +} + +type RequestTopic struct { + Topic string `kafka:"min=v0,max=v11"` + Partitions []RequestPartition `kafka:"min=v0,max=v11"` +} + +type RequestPartition struct { + Partition int32 `kafka:"min=v0,max=v11"` + CurrentLeaderEpoch int32 `kafka:"min=v9,max=v11"` + FetchOffset int64 `kafka:"min=v0,max=v11"` + LogStartOffset int64 `kafka:"min=v5,max=v11"` + PartitionMaxBytes int32 `kafka:"min=v0,max=v11"` +} + +type RequestForgottenTopic struct { + Topic string `kafka:"min=v7,max=v11"` + Partitions []int32 `kafka:"min=v7,max=v11"` +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v11"` + ErrorCode int16 `kafka:"min=v7,max=v11"` + SessionID int32 `kafka:"min=v7,max=v11"` + Topics []ResponseTopic `kafka:"min=v0,max=v11"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.Fetch } + +type ResponseTopic struct { + Topic string `kafka:"min=v0,max=v11"` + Partitions []ResponsePartition `kafka:"min=v0,max=v11"` +} + +type ResponsePartition struct { + Partition int32 `kafka:"min=v0,max=v11"` + ErrorCode int16 `kafka:"min=v0,max=v11"` + HighWatermark int64 `kafka:"min=v0,max=v11"` + LastStableOffset int64 `kafka:"min=v4,max=v11"` + LogStartOffset int64 `kafka:"min=v5,max=v11"` + AbortedTransactions []ResponseTransaction `kafka:"min=v4,max=v11"` + PreferredReadReplica int32 `kafka:"min=v11,max=v11"` + RecordSet protocol.RecordSet `kafka:"min=v0,max=v11"` +} + +type ResponseTransaction struct { + ProducerID int64 `kafka:"min=v4,max=v11"` + FirstOffset int64 `kafka:"min=v4,max=v11"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) +) + +type Error struct { + Err error +} + +func NewError(err error) *Error { + return &Error{Err: err} +} + +func (e *Error) Error() string { + return fmt.Sprintf("fetch request error: %v", e.Err) +} + +func (e *Error) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/findcoordinator/findcoordinator.go b/vendor/github.com/segmentio/kafka-go/protocol/findcoordinator/findcoordinator.go new file mode 100644 index 00000000000..0306e206d2d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/findcoordinator/findcoordinator.go @@ -0,0 +1,25 @@ +package findcoordinator + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + Key string `kafka:"min=v0,max=v2"` + KeyType int8 `kafka:"min=v1,max=v2"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.FindCoordinator } + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` + ErrorCode int16 `kafka:"min=v0,max=v2"` + ErrorMessage string `kafka:"min=v1,max=v2,nullable"` + NodeID int32 `kafka:"min=v0,max=v2"` + Host string `kafka:"min=v0,max=v2"` + Port int32 `kafka:"min=v0,max=v2"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.FindCoordinator } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/heartbeat/heartbeat.go b/vendor/github.com/segmentio/kafka-go/protocol/heartbeat/heartbeat.go new file mode 100644 index 00000000000..cf4c1118537 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/heartbeat/heartbeat.go @@ -0,0 +1,36 @@ +package heartbeat + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_Heartbeat +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + GroupID string `kafka:"min=v0,max=v4"` + GenerationID int32 `kafka:"min=v0,max=v4"` + MemberID string `kafka:"min=v0,max=v4"` + GroupInstanceID string `kafka:"min=v3,max=v4,nullable"` +} + +func (r *Request) ApiKey() protocol.ApiKey { + return protocol.Heartbeat +} + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + ErrorCode int16 `kafka:"min=v0,max=v4"` + ThrottleTimeMs int32 `kafka:"min=v1,max=v4"` +} + +func (r *Response) ApiKey() protocol.ApiKey { + return protocol.Heartbeat +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/incrementalalterconfigs/incrementalalterconfigs.go b/vendor/github.com/segmentio/kafka-go/protocol/incrementalalterconfigs/incrementalalterconfigs.go new file mode 100644 index 00000000000..f4328efc1dd --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/incrementalalterconfigs/incrementalalterconfigs.go @@ -0,0 +1,79 @@ +package incrementalalterconfigs + +import ( + "errors" + "strconv" + + "github.com/segmentio/kafka-go/protocol" +) + +const ( + resourceTypeBroker int8 = 4 +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_IncrementalAlterConfigs +type Request struct { + Resources []RequestResource `kafka:"min=v0,max=v0"` + ValidateOnly bool `kafka:"min=v0,max=v0"` +} + +type RequestResource struct { + ResourceType int8 `kafka:"min=v0,max=v0"` + ResourceName string `kafka:"min=v0,max=v0"` + Configs []RequestConfig `kafka:"min=v0,max=v0"` +} + +type RequestConfig struct { + Name string `kafka:"min=v0,max=v0"` + ConfigOperation int8 `kafka:"min=v0,max=v0"` + Value string `kafka:"min=v0,max=v0,nullable"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.IncrementalAlterConfigs } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + // Check that at most only one broker is being updated. + // + // TODO: Support updating multiple brokers in a single request. + brokers := map[string]struct{}{} + for _, resource := range r.Resources { + if resource.ResourceType == resourceTypeBroker { + brokers[resource.ResourceName] = struct{}{} + } + } + if len(brokers) > 1 { + return protocol.Broker{}, + errors.New("Updating more than one broker in a single request is not supported yet") + } + + for _, resource := range r.Resources { + if resource.ResourceType == resourceTypeBroker { + brokerID, err := strconv.Atoi(resource.ResourceName) + if err != nil { + return protocol.Broker{}, err + } + + return cluster.Brokers[int32(brokerID)], nil + } + } + + return cluster.Brokers[cluster.Controller], nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + Responses []ResponseAlterResponse `kafka:"min=v0,max=v0"` +} + +type ResponseAlterResponse struct { + ErrorCode int16 `kafka:"min=v0,max=v0"` + ErrorMessage string `kafka:"min=v0,max=v0,nullable"` + ResourceType int8 `kafka:"min=v0,max=v0"` + ResourceName string `kafka:"min=v0,max=v0"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.IncrementalAlterConfigs } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/initproducerid/initproducerid.go b/vendor/github.com/segmentio/kafka-go/protocol/initproducerid/initproducerid.go new file mode 100644 index 00000000000..17f6ef7ea35 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/initproducerid/initproducerid.go @@ -0,0 +1,37 @@ +package initproducerid + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v2,max=v4,tag"` + + TransactionalID string `kafka:"min=v0,max=v4,nullable"` + TransactionTimeoutMs int32 `kafka:"min=v0,max=v4"` + ProducerID int64 `kafka:"min=v3,max=v4"` + ProducerEpoch int16 `kafka:"min=v3,max=v4"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.InitProducerId } + +func (r *Request) Transaction() string { return r.TransactionalID } + +var _ protocol.TransactionalMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v2,max=v4,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v4"` + ErrorCode int16 `kafka:"min=v0,max=v4"` + ProducerID int64 `kafka:"min=v0,max=v4"` + ProducerEpoch int16 `kafka:"min=v0,max=v4"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.InitProducerId } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/joingroup/joingroup.go b/vendor/github.com/segmentio/kafka-go/protocol/joingroup/joingroup.go new file mode 100644 index 00000000000..a0738eed09b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/joingroup/joingroup.go @@ -0,0 +1,67 @@ +package joingroup + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v6,max=v7,tag"` + + GroupID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + SessionTimeoutMS int32 `kafka:"min=v0,max=v7"` + RebalanceTimeoutMS int32 `kafka:"min=v1,max=v7"` + MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + GroupInstanceID string `kafka:"min=v5,max=v5,nullable|min=v6,max=v7,compact,nullable"` + ProtocolType string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + Protocols []RequestProtocol `kafka:"min=v0,max=v7"` +} + +type RequestProtocol struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v6,max=v7,tag"` + + Name string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + Metadata []byte `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { + return protocol.JoinGroup +} + +func (r *Request) Group() string { return r.GroupID } + +var _ protocol.GroupMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v6,max=v7,tag"` + + ThrottleTimeMS int32 `kafka:"min=v2,max=v7"` + ErrorCode int16 `kafka:"min=v0,max=v7"` + GenerationID int32 `kafka:"min=v0,max=v7"` + ProtocolType string `kafka:"min=v7,max=v7,compact,nullable"` + ProtocolName string `kafka:"min=v0,max=v5|min=v6,max=v6,compact|min=v7,max=v7,compact,nullable"` + LeaderID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + Members []ResponseMember `kafka:"min=v0,max=v7"` +} + +type ResponseMember struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v6,max=v7,tag"` + + MemberID string `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` + GroupInstanceID string `kafka:"min=v5,max=v5,nullable|min=v6,max=v7,nullable,compact"` + Metadata []byte `kafka:"min=v0,max=v5|min=v6,max=v7,compact"` +} + +type ResponseMemberMetadata struct{} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.JoinGroup } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/leavegroup/leavegroup.go b/vendor/github.com/segmentio/kafka-go/protocol/leavegroup/leavegroup.go new file mode 100644 index 00000000000..4dd8773e435 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/leavegroup/leavegroup.go @@ -0,0 +1,65 @@ +package leavegroup + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + GroupID string `kafka:"min=v0,max=v2|min=v3,max=v4,compact"` + MemberID string `kafka:"min=v0,max=v2"` + Members []RequestMember `kafka:"min=v3,max=v4"` +} + +func (r *Request) Prepare(apiVersion int16) { + if apiVersion < 3 { + if len(r.Members) > 0 { + r.MemberID = r.Members[0].MemberID + } + } +} + +type RequestMember struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + MemberID string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"` + GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.LeaveGroup } + +func (r *Request) Group() string { return r.GroupID } + +var ( + _ protocol.GroupMessage = (*Request)(nil) + _ protocol.PreparedMessage = (*Request)(nil) +) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + ErrorCode int16 `kafka:"min=v0,max=v4"` + ThrottleTimeMS int32 `kafka:"min=v1,max=v4"` + Members []ResponseMember `kafka:"min=v3,max=v4"` +} + +type ResponseMember struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v4,tag"` + + MemberID string `kafka:"min=v3,max=v3|min=v4,max=v4,compact"` + GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v4,nullable,compact"` + ErrorCode int16 `kafka:"min=v3,max=v4"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.LeaveGroup } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/listgroups/listgroups.go b/vendor/github.com/segmentio/kafka-go/protocol/listgroups/listgroups.go new file mode 100644 index 00000000000..136458a25cb --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/listgroups/listgroups.go @@ -0,0 +1,82 @@ +package listgroups + +import ( + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +// Detailed API definition: https://kafka.apache.org/protocol#The_Messages_ListGroups +type Request struct { + _ struct{} `kafka:"min=v0,max=v2"` + brokerID int32 +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListGroups } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + return cluster.Brokers[r.brokerID], nil +} + +func (r *Request) Split(cluster protocol.Cluster) ( + []protocol.Message, + protocol.Merger, + error, +) { + messages := []protocol.Message{} + + for _, broker := range cluster.Brokers { + messages = append(messages, &Request{brokerID: broker.ID}) + } + + return messages, new(Response), nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v1,max=v2"` + ErrorCode int16 `kafka:"min=v0,max=v2"` + Groups []ResponseGroup `kafka:"min=v0,max=v2"` +} + +type ResponseGroup struct { + GroupID string `kafka:"min=v0,max=v2"` + ProtocolType string `kafka:"min=v0,max=v2"` + + // Use this to store which broker returned the response + BrokerID int32 `kafka:"-"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListGroups } + +func (r *Response) Merge(requests []protocol.Message, results []interface{}) ( + protocol.Message, + error, +) { + response := &Response{} + + for r, result := range results { + m, err := protocol.Result(result) + if err != nil { + return nil, err + } + brokerResp := m.(*Response) + respGroups := []ResponseGroup{} + + for _, brokerResp := range brokerResp.Groups { + respGroups = append( + respGroups, + ResponseGroup{ + GroupID: brokerResp.GroupID, + ProtocolType: brokerResp.ProtocolType, + BrokerID: requests[r].(*Request).brokerID, + }, + ) + } + + response.Groups = append(response.Groups, respGroups...) + } + + return response, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/listoffsets/listoffsets.go b/vendor/github.com/segmentio/kafka-go/protocol/listoffsets/listoffsets.go new file mode 100644 index 00000000000..059662d9ae1 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/listoffsets/listoffsets.go @@ -0,0 +1,230 @@ +package listoffsets + +import ( + "sort" + + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + ReplicaID int32 `kafka:"min=v1,max=v5"` + IsolationLevel int8 `kafka:"min=v2,max=v5"` + Topics []RequestTopic `kafka:"min=v1,max=v5"` +} + +type RequestTopic struct { + Topic string `kafka:"min=v1,max=v5"` + Partitions []RequestPartition `kafka:"min=v1,max=v5"` +} + +type RequestPartition struct { + Partition int32 `kafka:"min=v1,max=v5"` + CurrentLeaderEpoch int32 `kafka:"min=v4,max=v5"` + Timestamp int64 `kafka:"min=v1,max=v5"` + // v0 of the API predates kafka 0.10, and doesn't make much sense to + // use so we chose not to support it. It had this extra field to limit + // the number of offsets returned, which has been removed in v1. + // + // MaxNumOffsets int32 `kafka:"min=v0,max=v0"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.ListOffsets } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + // Expects r to be a request that was returned by Map, will likely panic + // or produce the wrong result if that's not the case. + partition := r.Topics[0].Partitions[0].Partition + topic := r.Topics[0].Topic + + for _, p := range cluster.Topics[topic].Partitions { + if p.ID == partition { + return cluster.Brokers[p.Leader], nil + } + } + + return protocol.Broker{ID: -1}, nil +} + +func (r *Request) Split(cluster protocol.Cluster) ([]protocol.Message, protocol.Merger, error) { + // Because kafka refuses to answer ListOffsets requests containing multiple + // entries of unique topic/partition pairs, we submit multiple requests on + // the wire and merge their results back. + // + // ListOffsets requests also need to be sent to partition leaders, to keep + // the logic simple we simply split each offset request into a single + // message. This may cause a bit more requests to be sent on the wire but + // it keeps the code sane, we can still optimize the aggregation mechanism + // later if it becomes a problem. + // + // Really the idea here is to shield applications from having to deal with + // the limitation of the kafka server, so they can request any combinations + // of topic/partition/offsets. + requests := make([]Request, 0, 2*len(r.Topics)) + + for _, t := range r.Topics { + for _, p := range t.Partitions { + requests = append(requests, Request{ + ReplicaID: r.ReplicaID, + IsolationLevel: r.IsolationLevel, + Topics: []RequestTopic{{ + Topic: t.Topic, + Partitions: []RequestPartition{{ + Partition: p.Partition, + CurrentLeaderEpoch: p.CurrentLeaderEpoch, + Timestamp: p.Timestamp, + }}, + }}, + }) + } + } + + messages := make([]protocol.Message, len(requests)) + + for i := range requests { + messages[i] = &requests[i] + } + + return messages, new(Response), nil +} + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v2,max=v5"` + Topics []ResponseTopic `kafka:"min=v1,max=v5"` +} + +type ResponseTopic struct { + Topic string `kafka:"min=v1,max=v5"` + Partitions []ResponsePartition `kafka:"min=v1,max=v5"` +} + +type ResponsePartition struct { + Partition int32 `kafka:"min=v1,max=v5"` + ErrorCode int16 `kafka:"min=v1,max=v5"` + Timestamp int64 `kafka:"min=v1,max=v5"` + Offset int64 `kafka:"min=v1,max=v5"` + LeaderEpoch int32 `kafka:"min=v4,max=v5"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.ListOffsets } + +func (r *Response) Merge(requests []protocol.Message, results []interface{}) (protocol.Message, error) { + type topicPartition struct { + topic string + partition int32 + } + + // Kafka doesn't always return the timestamp in the response, for example + // when the request sends -2 (for the first offset) it always returns -1, + // probably to indicate that the timestamp is unknown. This means that we + // can't correlate the requests and responses based on their timestamps, + // the primary key is the topic/partition pair. + // + // To make the API a bit friendly, we reconstructing an index of topic + // partitions to the timestamps that were requested, and override the + // timestamp value in the response. + timestamps := make([]map[topicPartition]int64, len(requests)) + + for i, m := range requests { + req := m.(*Request) + ts := make(map[topicPartition]int64, len(req.Topics)) + + for _, t := range req.Topics { + for _, p := range t.Partitions { + ts[topicPartition{ + topic: t.Topic, + partition: p.Partition, + }] = p.Timestamp + } + } + + timestamps[i] = ts + } + + topics := make(map[string][]ResponsePartition) + errors := 0 + + for i, res := range results { + m, err := protocol.Result(res) + if err != nil { + for _, t := range requests[i].(*Request).Topics { + partitions := topics[t.Topic] + + for _, p := range t.Partitions { + partitions = append(partitions, ResponsePartition{ + Partition: p.Partition, + ErrorCode: -1, // UNKNOWN, can we do better? + Timestamp: -1, + Offset: -1, + LeaderEpoch: -1, + }) + } + + topics[t.Topic] = partitions + } + errors++ + continue + } + + response := m.(*Response) + + if r.ThrottleTimeMs < response.ThrottleTimeMs { + r.ThrottleTimeMs = response.ThrottleTimeMs + } + + for _, t := range response.Topics { + for _, p := range t.Partitions { + if timestamp, ok := timestamps[i][topicPartition{ + topic: t.Topic, + partition: p.Partition, + }]; ok { + p.Timestamp = timestamp + } + topics[t.Topic] = append(topics[t.Topic], p) + } + } + + } + + if errors > 0 && errors == len(results) { + _, err := protocol.Result(results[0]) + return nil, err + } + + r.Topics = make([]ResponseTopic, 0, len(topics)) + + for topicName, partitions := range topics { + r.Topics = append(r.Topics, ResponseTopic{ + Topic: topicName, + Partitions: partitions, + }) + } + + sort.Slice(r.Topics, func(i, j int) bool { + return r.Topics[i].Topic < r.Topics[j].Topic + }) + + for _, t := range r.Topics { + sort.Slice(t.Partitions, func(i, j int) bool { + p1 := &t.Partitions[i] + p2 := &t.Partitions[j] + + if p1.Partition != p2.Partition { + return p1.Partition < p2.Partition + } + + return p1.Offset < p2.Offset + }) + } + + return r, nil +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) + _ protocol.Splitter = (*Request)(nil) + _ protocol.Merger = (*Response)(nil) +) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/metadata/metadata.go b/vendor/github.com/segmentio/kafka-go/protocol/metadata/metadata.go new file mode 100644 index 00000000000..ac2031bda33 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/metadata/metadata.go @@ -0,0 +1,52 @@ +package metadata + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + TopicNames []string `kafka:"min=v0,max=v8,nullable"` + AllowAutoTopicCreation bool `kafka:"min=v4,max=v8"` + IncludeClusterAuthorizedOperations bool `kafka:"min=v8,max=v8"` + IncludeTopicAuthorizedOperations bool `kafka:"min=v8,max=v8"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.Metadata } + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v3,max=v8"` + Brokers []ResponseBroker `kafka:"min=v0,max=v8"` + ClusterID string `kafka:"min=v2,max=v8,nullable"` + ControllerID int32 `kafka:"min=v1,max=v8"` + Topics []ResponseTopic `kafka:"min=v0,max=v8"` + ClusterAuthorizedOperations int32 `kafka:"min=v8,max=v8"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.Metadata } + +type ResponseBroker struct { + NodeID int32 `kafka:"min=v0,max=v8"` + Host string `kafka:"min=v0,max=v8"` + Port int32 `kafka:"min=v0,max=v8"` + Rack string `kafka:"min=v1,max=v8,nullable"` +} + +type ResponseTopic struct { + ErrorCode int16 `kafka:"min=v0,max=v8"` + Name string `kafka:"min=v0,max=v8"` + IsInternal bool `kafka:"min=v1,max=v8"` + Partitions []ResponsePartition `kafka:"min=v0,max=v8"` + TopicAuthorizedOperations int32 `kafka:"min=v8,max=v8"` +} + +type ResponsePartition struct { + ErrorCode int16 `kafka:"min=v0,max=v8"` + PartitionIndex int32 `kafka:"min=v0,max=v8"` + LeaderID int32 `kafka:"min=v0,max=v8"` + LeaderEpoch int32 `kafka:"min=v7,max=v8"` + ReplicaNodes []int32 `kafka:"min=v0,max=v8"` + IsrNodes []int32 `kafka:"min=v0,max=v8"` + OfflineReplicas []int32 `kafka:"min=v5,max=v8"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/offsetcommit/offsetcommit.go b/vendor/github.com/segmentio/kafka-go/protocol/offsetcommit/offsetcommit.go new file mode 100644 index 00000000000..5844928a64e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/offsetcommit/offsetcommit.go @@ -0,0 +1,54 @@ +package offsetcommit + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + GroupID string `kafka:"min=v0,max=v7"` + GenerationID int32 `kafka:"min=v1,max=v7"` + MemberID string `kafka:"min=v1,max=v7"` + RetentionTimeMs int64 `kafka:"min=v2,max=v4"` + GroupInstanceID string `kafka:"min=v7,max=v7,nullable"` + Topics []RequestTopic `kafka:"min=v0,max=v7"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetCommit } + +func (r *Request) Group() string { return r.GroupID } + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v7"` + Partitions []RequestPartition `kafka:"min=v0,max=v7"` +} + +type RequestPartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v7"` + CommittedOffset int64 `kafka:"min=v0,max=v7"` + CommitTimestamp int64 `kafka:"min=v1,max=v1"` + CommittedLeaderEpoch int32 `kafka:"min=v5,max=v7"` + CommittedMetadata string `kafka:"min=v0,max=v7,nullable"` +} + +var ( + _ protocol.GroupMessage = (*Request)(nil) +) + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v3,max=v7"` + Topics []ResponseTopic `kafka:"min=v0,max=v7"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetCommit } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v7"` + Partitions []ResponsePartition `kafka:"min=v0,max=v7"` +} + +type ResponsePartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v7"` + ErrorCode int16 `kafka:"min=v0,max=v7"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/offsetdelete/offsetdelete.go b/vendor/github.com/segmentio/kafka-go/protocol/offsetdelete/offsetdelete.go new file mode 100644 index 00000000000..bda619f3ce6 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/offsetdelete/offsetdelete.go @@ -0,0 +1,47 @@ +package offsetdelete + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + GroupID string `kafka:"min=v0,max=v0"` + Topics []RequestTopic `kafka:"min=v0,max=v0"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetDelete } + +func (r *Request) Group() string { return r.GroupID } + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v0"` + Partitions []RequestPartition `kafka:"min=v0,max=v0"` +} + +type RequestPartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v0"` +} + +var ( + _ protocol.GroupMessage = (*Request)(nil) +) + +type Response struct { + ErrorCode int16 `kafka:"min=v0,max=v0"` + ThrottleTimeMs int32 `kafka:"min=v0,max=v0"` + Topics []ResponseTopic `kafka:"min=v0,max=v0"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetDelete } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v0"` + Partitions []ResponsePartition `kafka:"min=v0,max=v0"` +} + +type ResponsePartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v0"` + ErrorCode int16 `kafka:"min=v0,max=v0"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/offsetfetch/offsetfetch.go b/vendor/github.com/segmentio/kafka-go/protocol/offsetfetch/offsetfetch.go new file mode 100644 index 00000000000..011003340c7 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/offsetfetch/offsetfetch.go @@ -0,0 +1,46 @@ +package offsetfetch + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + GroupID string `kafka:"min=v0,max=v5"` + Topics []RequestTopic `kafka:"min=v0,max=v5"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.OffsetFetch } + +func (r *Request) Group() string { return r.GroupID } + +type RequestTopic struct { + Name string `kafka:"min=v0,max=v5"` + PartitionIndexes []int32 `kafka:"min=v0,max=v5"` +} + +var ( + _ protocol.GroupMessage = (*Request)(nil) +) + +type Response struct { + ThrottleTimeMs int32 `kafka:"min=v3,max=v5"` + Topics []ResponseTopic `kafka:"min=v0,max=v5"` + ErrorCode int16 `kafka:"min=v2,max=v5"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.OffsetFetch } + +type ResponseTopic struct { + Name string `kafka:"min=v0,max=v5"` + Partitions []ResponsePartition `kafka:"min=v0,max=v5"` +} + +type ResponsePartition struct { + PartitionIndex int32 `kafka:"min=v0,max=v5"` + CommittedOffset int64 `kafka:"min=v0,max=v5"` + ComittedLeaderEpoch int32 `kafka:"min=v5,max=v5"` + Metadata string `kafka:"min=v0,max=v5,nullable"` + ErrorCode int16 `kafka:"min=v0,max=v5"` +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/produce/produce.go b/vendor/github.com/segmentio/kafka-go/protocol/produce/produce.go new file mode 100644 index 00000000000..6d337c3cf63 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/produce/produce.go @@ -0,0 +1,147 @@ +package produce + +import ( + "fmt" + + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + TransactionalID string `kafka:"min=v3,max=v8,nullable"` + Acks int16 `kafka:"min=v0,max=v8"` + Timeout int32 `kafka:"min=v0,max=v8"` + Topics []RequestTopic `kafka:"min=v0,max=v8"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.Produce } + +func (r *Request) Broker(cluster protocol.Cluster) (protocol.Broker, error) { + broker := protocol.Broker{ID: -1} + + for i := range r.Topics { + t := &r.Topics[i] + + topic, ok := cluster.Topics[t.Topic] + if !ok { + return broker, NewError(protocol.NewErrNoTopic(t.Topic)) + } + + for j := range t.Partitions { + p := &t.Partitions[j] + + partition, ok := topic.Partitions[p.Partition] + if !ok { + return broker, NewError(protocol.NewErrNoPartition(t.Topic, p.Partition)) + } + + if b, ok := cluster.Brokers[partition.Leader]; !ok { + return broker, NewError(protocol.NewErrNoLeader(t.Topic, p.Partition)) + } else if broker.ID < 0 { + broker = b + } else if b.ID != broker.ID { + return broker, NewError(fmt.Errorf("mismatching leaders (%d!=%d)", b.ID, broker.ID)) + } + } + } + + return broker, nil +} + +func (r *Request) Prepare(apiVersion int16) { + // Determine which version of the message should be used, based on which + // version of the Produce API is supported by the server. + // + // In version 0.11, kafka gives this error: + // + // org.apache.kafka.common.record.InvalidRecordException + // Produce requests with version 3 are only allowed to contain record batches with magic version. + // + // In version 2.x, kafka refuses the message claiming that the CRC32 + // checksum is invalid. + var recordVersion int8 + + if apiVersion < 3 { + recordVersion = 1 + } else { + recordVersion = 2 + } + + for i := range r.Topics { + t := &r.Topics[i] + + for j := range t.Partitions { + p := &t.Partitions[j] + + // Allow the program to overload the version if really needed. + if p.RecordSet.Version == 0 { + p.RecordSet.Version = recordVersion + } + } + } +} + +func (r *Request) HasResponse() bool { + return r.Acks != 0 +} + +type RequestTopic struct { + Topic string `kafka:"min=v0,max=v8"` + Partitions []RequestPartition `kafka:"min=v0,max=v8"` +} + +type RequestPartition struct { + Partition int32 `kafka:"min=v0,max=v8"` + RecordSet protocol.RecordSet `kafka:"min=v0,max=v8"` +} + +type Response struct { + Topics []ResponseTopic `kafka:"min=v0,max=v8"` + ThrottleTimeMs int32 `kafka:"min=v1,max=v8"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.Produce } + +type ResponseTopic struct { + Topic string `kafka:"min=v0,max=v8"` + Partitions []ResponsePartition `kafka:"min=v0,max=v8"` +} + +type ResponsePartition struct { + Partition int32 `kafka:"min=v0,max=v8"` + ErrorCode int16 `kafka:"min=v0,max=v8"` + BaseOffset int64 `kafka:"min=v0,max=v8"` + LogAppendTime int64 `kafka:"min=v2,max=v8"` + LogStartOffset int64 `kafka:"min=v5,max=v8"` + RecordErrors []ResponseError `kafka:"min=v8,max=v8"` + ErrorMessage string `kafka:"min=v8,max=v8,nullable"` +} + +type ResponseError struct { + BatchIndex int32 `kafka:"min=v8,max=v8"` + BatchIndexErrorMessage string `kafka:"min=v8,max=v8,nullable"` +} + +var ( + _ protocol.BrokerMessage = (*Request)(nil) + _ protocol.PreparedMessage = (*Request)(nil) +) + +type Error struct { + Err error +} + +func NewError(err error) *Error { + return &Error{Err: err} +} + +func (e *Error) Error() string { + return fmt.Sprintf("fetch request error: %v", e.Err) +} + +func (e *Error) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/protocol.go b/vendor/github.com/segmentio/kafka-go/protocol/protocol.go new file mode 100644 index 00000000000..f5f5361488a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/protocol.go @@ -0,0 +1,506 @@ +package protocol + +import ( + "errors" + "fmt" + "io" + "net" + "reflect" + "strconv" + "strings" +) + +// Message is an interface implemented by all request and response types of the +// kafka protocol. +// +// This interface is used mostly as a safe-guard to provide a compile-time check +// for values passed to functions dealing kafka message types. +type Message interface { + ApiKey() ApiKey +} + +type ApiKey int16 + +func (k ApiKey) String() string { + if i := int(k); i >= 0 && i < len(apiNames) { + return apiNames[i] + } + return strconv.Itoa(int(k)) +} + +func (k ApiKey) MinVersion() int16 { return k.apiType().minVersion() } + +func (k ApiKey) MaxVersion() int16 { return k.apiType().maxVersion() } + +func (k ApiKey) SelectVersion(minVersion, maxVersion int16) int16 { + min := k.MinVersion() + max := k.MaxVersion() + switch { + case min > maxVersion: + return min + case max < maxVersion: + return max + default: + return maxVersion + } +} + +func (k ApiKey) apiType() apiType { + if i := int(k); i >= 0 && i < len(apiTypes) { + return apiTypes[i] + } + return apiType{} +} + +const ( + Produce ApiKey = 0 + Fetch ApiKey = 1 + ListOffsets ApiKey = 2 + Metadata ApiKey = 3 + LeaderAndIsr ApiKey = 4 + StopReplica ApiKey = 5 + UpdateMetadata ApiKey = 6 + ControlledShutdown ApiKey = 7 + OffsetCommit ApiKey = 8 + OffsetFetch ApiKey = 9 + FindCoordinator ApiKey = 10 + JoinGroup ApiKey = 11 + Heartbeat ApiKey = 12 + LeaveGroup ApiKey = 13 + SyncGroup ApiKey = 14 + DescribeGroups ApiKey = 15 + ListGroups ApiKey = 16 + SaslHandshake ApiKey = 17 + ApiVersions ApiKey = 18 + CreateTopics ApiKey = 19 + DeleteTopics ApiKey = 20 + DeleteRecords ApiKey = 21 + InitProducerId ApiKey = 22 + OffsetForLeaderEpoch ApiKey = 23 + AddPartitionsToTxn ApiKey = 24 + AddOffsetsToTxn ApiKey = 25 + EndTxn ApiKey = 26 + WriteTxnMarkers ApiKey = 27 + TxnOffsetCommit ApiKey = 28 + DescribeAcls ApiKey = 29 + CreateAcls ApiKey = 30 + DeleteAcls ApiKey = 31 + DescribeConfigs ApiKey = 32 + AlterConfigs ApiKey = 33 + AlterReplicaLogDirs ApiKey = 34 + DescribeLogDirs ApiKey = 35 + SaslAuthenticate ApiKey = 36 + CreatePartitions ApiKey = 37 + CreateDelegationToken ApiKey = 38 + RenewDelegationToken ApiKey = 39 + ExpireDelegationToken ApiKey = 40 + DescribeDelegationToken ApiKey = 41 + DeleteGroups ApiKey = 42 + ElectLeaders ApiKey = 43 + IncrementalAlterConfigs ApiKey = 44 + AlterPartitionReassignments ApiKey = 45 + ListPartitionReassignments ApiKey = 46 + OffsetDelete ApiKey = 47 + DescribeClientQuotas ApiKey = 48 + AlterClientQuotas ApiKey = 49 + + numApis = 50 +) + +var apiNames = [numApis]string{ + Produce: "Produce", + Fetch: "Fetch", + ListOffsets: "ListOffsets", + Metadata: "Metadata", + LeaderAndIsr: "LeaderAndIsr", + StopReplica: "StopReplica", + UpdateMetadata: "UpdateMetadata", + ControlledShutdown: "ControlledShutdown", + OffsetCommit: "OffsetCommit", + OffsetFetch: "OffsetFetch", + FindCoordinator: "FindCoordinator", + JoinGroup: "JoinGroup", + Heartbeat: "Heartbeat", + LeaveGroup: "LeaveGroup", + SyncGroup: "SyncGroup", + DescribeGroups: "DescribeGroups", + ListGroups: "ListGroups", + SaslHandshake: "SaslHandshake", + ApiVersions: "ApiVersions", + CreateTopics: "CreateTopics", + DeleteTopics: "DeleteTopics", + DeleteRecords: "DeleteRecords", + InitProducerId: "InitProducerId", + OffsetForLeaderEpoch: "OffsetForLeaderEpoch", + AddPartitionsToTxn: "AddPartitionsToTxn", + AddOffsetsToTxn: "AddOffsetsToTxn", + EndTxn: "EndTxn", + WriteTxnMarkers: "WriteTxnMarkers", + TxnOffsetCommit: "TxnOffsetCommit", + DescribeAcls: "DescribeAcls", + CreateAcls: "CreateAcls", + DeleteAcls: "DeleteAcls", + DescribeConfigs: "DescribeConfigs", + AlterConfigs: "AlterConfigs", + AlterReplicaLogDirs: "AlterReplicaLogDirs", + DescribeLogDirs: "DescribeLogDirs", + SaslAuthenticate: "SaslAuthenticate", + CreatePartitions: "CreatePartitions", + CreateDelegationToken: "CreateDelegationToken", + RenewDelegationToken: "RenewDelegationToken", + ExpireDelegationToken: "ExpireDelegationToken", + DescribeDelegationToken: "DescribeDelegationToken", + DeleteGroups: "DeleteGroups", + ElectLeaders: "ElectLeaders", + IncrementalAlterConfigs: "IncrementalAlterConfigs", + AlterPartitionReassignments: "AlterPartitionReassignments", + ListPartitionReassignments: "ListPartitionReassignments", + OffsetDelete: "OffsetDelete", + DescribeClientQuotas: "DescribeClientQuotas", + AlterClientQuotas: "AlterClientQuotas", +} + +type messageType struct { + version int16 + flexible bool + gotype reflect.Type + decode decodeFunc + encode encodeFunc +} + +func (t *messageType) new() Message { + return reflect.New(t.gotype).Interface().(Message) +} + +type apiType struct { + requests []messageType + responses []messageType +} + +func (t apiType) minVersion() int16 { + if len(t.requests) == 0 { + return 0 + } + return t.requests[0].version +} + +func (t apiType) maxVersion() int16 { + if len(t.requests) == 0 { + return 0 + } + return t.requests[len(t.requests)-1].version +} + +var apiTypes [numApis]apiType + +// Register is automatically called by sub-packages are imported to install a +// new pair of request/response message types. +func Register(req, res Message) { + k1 := req.ApiKey() + k2 := res.ApiKey() + + if k1 != k2 { + panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2)) + } + + apiTypes[k1] = apiType{ + requests: typesOf(req), + responses: typesOf(res), + } +} + +func typesOf(v interface{}) []messageType { + return makeTypes(reflect.TypeOf(v).Elem()) +} + +func makeTypes(t reflect.Type) []messageType { + minVersion := int16(-1) + maxVersion := int16(-1) + + // All future versions will be flexible (according to spec), so don't need to + // worry about maxes here. + minFlexibleVersion := int16(-1) + + forEachStructField(t, func(_ reflect.Type, _ index, tag string) { + forEachStructTag(tag, func(tag structTag) bool { + if minVersion < 0 || tag.MinVersion < minVersion { + minVersion = tag.MinVersion + } + if maxVersion < 0 || tag.MaxVersion > maxVersion { + maxVersion = tag.MaxVersion + } + if tag.TagID > -2 && (minFlexibleVersion < 0 || tag.MinVersion < minFlexibleVersion) { + minFlexibleVersion = tag.MinVersion + } + return true + }) + }) + + types := make([]messageType, 0, (maxVersion-minVersion)+1) + + for v := minVersion; v <= maxVersion; v++ { + flexible := minFlexibleVersion >= 0 && v >= minFlexibleVersion + + types = append(types, messageType{ + version: v, + gotype: t, + flexible: flexible, + decode: decodeFuncOf(t, v, flexible, structTag{}), + encode: encodeFuncOf(t, v, flexible, structTag{}), + }) + } + + return types +} + +type structTag struct { + MinVersion int16 + MaxVersion int16 + Compact bool + Nullable bool + TagID int +} + +func forEachStructTag(tag string, do func(structTag) bool) { + if tag == "-" { + return // special case to ignore the field + } + + forEach(tag, '|', func(s string) bool { + tag := structTag{ + MinVersion: -1, + MaxVersion: -1, + + // Legitimate tag IDs can start at 0. We use -1 as a placeholder to indicate + // that the message type is flexible, so that leaves -2 as the default for + // indicating that there is no tag ID and the message is not flexible. + TagID: -2, + } + + var err error + forEach(s, ',', func(s string) bool { + switch { + case strings.HasPrefix(s, "min="): + tag.MinVersion, err = parseVersion(s[4:]) + case strings.HasPrefix(s, "max="): + tag.MaxVersion, err = parseVersion(s[4:]) + case s == "tag": + tag.TagID = -1 + case strings.HasPrefix(s, "tag="): + tag.TagID, err = strconv.Atoi(s[4:]) + case s == "compact": + tag.Compact = true + case s == "nullable": + tag.Nullable = true + default: + err = fmt.Errorf("unrecognized option: %q", s) + } + return err == nil + }) + + if err != nil { + panic(fmt.Errorf("malformed struct tag: %w", err)) + } + + if tag.MinVersion < 0 && tag.MaxVersion >= 0 { + panic(fmt.Errorf("missing minimum version in struct tag: %q", s)) + } + + if tag.MaxVersion < 0 && tag.MinVersion >= 0 { + panic(fmt.Errorf("missing maximum version in struct tag: %q", s)) + } + + if tag.MinVersion > tag.MaxVersion { + panic(fmt.Errorf("invalid version range in struct tag: %q", s)) + } + + return do(tag) + }) +} + +func forEach(s string, sep byte, do func(string) bool) bool { + for len(s) != 0 { + p := "" + i := strings.IndexByte(s, sep) + if i < 0 { + p, s = s, "" + } else { + p, s = s[:i], s[i+1:] + } + if !do(p) { + return false + } + } + return true +} + +func forEachStructField(t reflect.Type, do func(reflect.Type, index, string)) { + for i, n := 0, t.NumField(); i < n; i++ { + f := t.Field(i) + + if f.PkgPath != "" && f.Name != "_" { + continue + } + + kafkaTag, ok := f.Tag.Lookup("kafka") + if !ok { + kafkaTag = "|" + } + + do(f.Type, indexOf(f), kafkaTag) + } +} + +func parseVersion(s string) (int16, error) { + if !strings.HasPrefix(s, "v") { + return 0, fmt.Errorf("invalid version number: %q", s) + } + i, err := strconv.ParseInt(s[1:], 10, 16) + if err != nil { + return 0, fmt.Errorf("invalid version number: %q: %w", s, err) + } + if i < 0 { + return 0, fmt.Errorf("invalid negative version number: %q", s) + } + return int16(i), nil +} + +func dontExpectEOF(err error) error { + if err != nil { + if errors.Is(err, io.EOF) { + return io.ErrUnexpectedEOF + } + + return err + } + + return nil +} + +type Broker struct { + Rack string + Host string + Port int32 + ID int32 +} + +func (b Broker) String() string { + return net.JoinHostPort(b.Host, itoa(b.Port)) +} + +func (b Broker) Format(w fmt.State, v rune) { + switch v { + case 'd': + io.WriteString(w, itoa(b.ID)) + case 's': + io.WriteString(w, b.String()) + case 'v': + io.WriteString(w, itoa(b.ID)) + io.WriteString(w, " ") + io.WriteString(w, b.String()) + if b.Rack != "" { + io.WriteString(w, " ") + io.WriteString(w, b.Rack) + } + } +} + +func itoa(i int32) string { + return strconv.Itoa(int(i)) +} + +type Topic struct { + Name string + Error int16 + Partitions map[int32]Partition +} + +type Partition struct { + ID int32 + Error int16 + Leader int32 + Replicas []int32 + ISR []int32 + Offline []int32 +} + +// RawExchanger is an extention to the Message interface to allow messages +// to control the request response cycle for the message. This is currently +// only used to facilitate v0 SASL Authenticate requests being written in +// a non-standard fashion when the SASL Handshake was done at v0 but not +// when done at v1. +type RawExchanger interface { + // Required should return true when a RawExchange is needed. + // The passed in versions are the negotiated versions for the connection + // performing the request. + Required(versions map[ApiKey]int16) bool + // RawExchange is given the raw connection to the broker and the Message + // is responsible for writing itself to the connection as well as reading + // the response. + RawExchange(rw io.ReadWriter) (Message, error) +} + +// BrokerMessage is an extension of the Message interface implemented by some +// request types to customize the broker assignment logic. +type BrokerMessage interface { + // Given a representation of the kafka cluster state as argument, returns + // the broker that the message should be routed to. + Broker(Cluster) (Broker, error) +} + +// GroupMessage is an extension of the Message interface implemented by some +// request types to inform the program that they should be routed to a group +// coordinator. +type GroupMessage interface { + // Returns the group configured on the message. + Group() string +} + +// TransactionalMessage is an extension of the Message interface implemented by some +// request types to inform the program that they should be routed to a transaction +// coordinator. +type TransactionalMessage interface { + // Returns the transactional id configured on the message. + Transaction() string +} + +// PreparedMessage is an extension of the Message interface implemented by some +// request types which may need to run some pre-processing on their state before +// being sent. +type PreparedMessage interface { + // Prepares the message before being sent to a kafka broker using the API + // version passed as argument. + Prepare(apiVersion int16) +} + +// Splitter is an interface implemented by messages that can be split into +// multiple requests and have their results merged back by a Merger. +type Splitter interface { + // For a given cluster layout, returns the list of messages constructed + // from the receiver for each requests that should be sent to the cluster. + // The second return value is a Merger which can be used to merge back the + // results of each request into a single message (or an error). + Split(Cluster) ([]Message, Merger, error) +} + +// Merger is an interface implemented by messages which can merge multiple +// results into one response. +type Merger interface { + // Given a list of message and associated results, merge them back into a + // response (or an error). The results must be either Message or error + // values, other types should trigger a panic. + Merge(messages []Message, results []interface{}) (Message, error) +} + +// Result converts r to a Message or an error, or panics if r could not be +// converted to these types. +func Result(r interface{}) (Message, error) { + switch v := r.(type) { + case Message: + return v, nil + case error: + return nil, v + default: + panic(fmt.Errorf("BUG: result must be a message or an error but not %T", v)) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/record.go b/vendor/github.com/segmentio/kafka-go/protocol/record.go new file mode 100644 index 00000000000..84594868b88 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/record.go @@ -0,0 +1,314 @@ +package protocol + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "time" + + "github.com/segmentio/kafka-go/compress" +) + +// Attributes is a bitset representing special attributes set on records. +type Attributes int16 + +const ( + Gzip Attributes = Attributes(compress.Gzip) // 1 + Snappy Attributes = Attributes(compress.Snappy) // 2 + Lz4 Attributes = Attributes(compress.Lz4) // 3 + Zstd Attributes = Attributes(compress.Zstd) // 4 + Transactional Attributes = 1 << 4 + Control Attributes = 1 << 5 +) + +func (a Attributes) Compression() compress.Compression { + return compress.Compression(a & 7) +} + +func (a Attributes) Transactional() bool { + return (a & Transactional) != 0 +} + +func (a Attributes) Control() bool { + return (a & Control) != 0 +} + +func (a Attributes) String() string { + s := a.Compression().String() + if a.Transactional() { + s += "+transactional" + } + if a.Control() { + s += "+control" + } + return s +} + +// Header represents a single entry in a list of record headers. +type Header struct { + Key string + Value []byte +} + +// Record is an interface representing a single kafka record. +// +// Record values are not safe to use concurrently from multiple goroutines. +type Record struct { + // The offset at which the record exists in a topic partition. This value + // is ignored in produce requests. + Offset int64 + + // Returns the time of the record. This value may be omitted in produce + // requests to let kafka set the time when it saves the record. + Time time.Time + + // Returns a byte sequence containing the key of this record. The returned + // sequence may be nil to indicate that the record has no key. If the record + // is part of a RecordSet, the content of the key must remain valid at least + // until the record set is closed (or until the key is closed). + Key Bytes + + // Returns a byte sequence containing the value of this record. The returned + // sequence may be nil to indicate that the record has no value. If the + // record is part of a RecordSet, the content of the value must remain valid + // at least until the record set is closed (or until the value is closed). + Value Bytes + + // Returns the list of headers associated with this record. The returned + // slice may be reused across calls, the program should use it as an + // immutable value. + Headers []Header +} + +// RecordSet represents a sequence of records in Produce requests and Fetch +// responses. All v0, v1, and v2 formats are supported. +type RecordSet struct { + // The message version that this record set will be represented as, valid + // values are 1, or 2. + // + // When reading, this is the value of the highest version used in the + // batches that compose the record set. + // + // When writing, this value dictates the format that the records will be + // encoded in. + Version int8 + + // Attributes set on the record set. + // + // When reading, the attributes are the combination of all attributes in + // the batches that compose the record set. + // + // When writing, the attributes apply to the whole sequence of records in + // the set. + Attributes Attributes + + // A reader exposing the sequence of records. + // + // When reading a RecordSet from an io.Reader, the Records field will be a + // *RecordStream. If the program needs to access the details of each batch + // that compose the stream, it may use type assertions to access the + // underlying types of each batch. + Records RecordReader +} + +// bufferedReader is an interface implemented by types like bufio.Reader, which +// we use to optimize prefix reads by accessing the internal buffer directly +// through calls to Peek. +type bufferedReader interface { + Discard(int) (int, error) + Peek(int) ([]byte, error) +} + +// bytesBuffer is an interface implemented by types like bytes.Buffer, which we +// use to optimize prefix reads by accessing the internal buffer directly +// through calls to Bytes. +type bytesBuffer interface { + Bytes() []byte +} + +// magicByteOffset is the position of the magic byte in all versions of record +// sets in the kafka protocol. +const magicByteOffset = 16 + +// ReadFrom reads the representation of a record set from r into rs, returning +// the number of bytes consumed from r, and an non-nil error if the record set +// could not be read. +func (rs *RecordSet) ReadFrom(r io.Reader) (int64, error) { + d, _ := r.(*decoder) + if d == nil { + d = &decoder{ + reader: r, + remain: 4, + } + } + + *rs = RecordSet{} + limit := d.remain + size := d.readInt32() + + if d.err != nil { + return int64(limit - d.remain), d.err + } + + if size <= 0 { + return 4, nil + } + + stream := &RecordStream{ + Records: make([]RecordReader, 0, 4), + } + + var err error + d.remain = int(size) + + for d.remain > 0 && err == nil { + var version byte + + if d.remain < (magicByteOffset + 1) { + if len(stream.Records) != 0 { + break + } + return 4, fmt.Errorf("impossible record set shorter than %d bytes", magicByteOffset+1) + } + + switch r := d.reader.(type) { + case bufferedReader: + b, err := r.Peek(magicByteOffset + 1) + if err != nil { + n, _ := r.Discard(len(b)) + return 4 + int64(n), dontExpectEOF(err) + } + version = b[magicByteOffset] + case bytesBuffer: + version = r.Bytes()[magicByteOffset] + default: + b := make([]byte, magicByteOffset+1) + if n, err := io.ReadFull(d.reader, b); err != nil { + return 4 + int64(n), dontExpectEOF(err) + } + version = b[magicByteOffset] + // Reconstruct the prefix that we had to read to determine the version + // of the record set from the magic byte. + // + // Technically this may recurisvely stack readers when consuming all + // items of the batch, which could hurt performance. In practice this + // path should not be taken tho, since the decoder would read from a + // *bufio.Reader which implements the bufferedReader interface. + d.reader = io.MultiReader(bytes.NewReader(b), d.reader) + } + + var tmp RecordSet + switch version { + case 0, 1: + err = tmp.readFromVersion1(d) + case 2: + err = tmp.readFromVersion2(d) + default: + err = fmt.Errorf("unsupported message version %d for message of size %d", version, size) + } + + if tmp.Version > rs.Version { + rs.Version = tmp.Version + } + + rs.Attributes |= tmp.Attributes + + if tmp.Records != nil { + stream.Records = append(stream.Records, tmp.Records) + } + } + + if len(stream.Records) != 0 { + rs.Records = stream + // Ignore errors if we've successfully read records, so the + // program can keep making progress. + err = nil + } + + d.discardAll() + rn := 4 + (int(size) - d.remain) + d.remain = limit - rn + return int64(rn), err +} + +// WriteTo writes the representation of rs into w. The value of rs.Version +// dictates which format that the record set will be represented as. +// +// The error will be ErrNoRecord if rs contained no records. +// +// Note: since this package is only compatible with kafka 0.10 and above, the +// method never produces messages in version 0. If rs.Version is zero, the +// method defaults to producing messages in version 1. +func (rs *RecordSet) WriteTo(w io.Writer) (int64, error) { + if rs.Records == nil { + return 0, ErrNoRecord + } + + // This optimization avoids rendering the record set in an intermediary + // buffer when the writer is already a pageBuffer, which is a common case + // due to the way WriteRequest and WriteResponse are implemented. + buffer, _ := w.(*pageBuffer) + bufferOffset := int64(0) + + if buffer != nil { + bufferOffset = buffer.Size() + } else { + buffer = newPageBuffer() + defer buffer.unref() + } + + size := packUint32(0) + buffer.Write(size[:]) // size placeholder + + var err error + switch rs.Version { + case 0, 1: + err = rs.writeToVersion1(buffer, bufferOffset+4) + case 2: + err = rs.writeToVersion2(buffer, bufferOffset+4) + default: + err = fmt.Errorf("unsupported record set version %d", rs.Version) + } + if err != nil { + return 0, err + } + + n := buffer.Size() - bufferOffset + if n == 0 { + size = packUint32(^uint32(0)) + } else { + size = packUint32(uint32(n) - 4) + } + buffer.WriteAt(size[:], bufferOffset) + + // This condition indicates that the output writer received by `WriteTo` was + // not a *pageBuffer, in which case we need to flush the buffered records + // data into it. + if buffer != w { + return buffer.WriteTo(w) + } + + return n, nil +} + +func makeTime(t int64) time.Time { + return time.Unix(t/1000, (t%1000)*int64(time.Millisecond)) +} + +func timestamp(t time.Time) int64 { + if t.IsZero() { + return 0 + } + return t.UnixNano() / int64(time.Millisecond) +} + +func packUint32(u uint32) (b [4]byte) { + binary.BigEndian.PutUint32(b[:], u) + return +} + +func packUint64(u uint64) (b [8]byte) { + binary.BigEndian.PutUint64(b[:], u) + return +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/record_batch.go b/vendor/github.com/segmentio/kafka-go/protocol/record_batch.go new file mode 100644 index 00000000000..eca5399331d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/record_batch.go @@ -0,0 +1,358 @@ +package protocol + +import ( + "errors" + "io" + "time" +) + +// RecordReader is an interface representing a sequence of records. Record sets +// are used in both produce and fetch requests to represent the sequence of +// records that are sent to or receive from kafka brokers. +// +// RecordSet values are not safe to use concurrently from multiple goroutines. +type RecordReader interface { + // Returns the next record in the set, or io.EOF if the end of the sequence + // has been reached. + // + // The returned Record is guaranteed to be valid until the next call to + // ReadRecord. If the program needs to retain the Record value it must make + // a copy. + ReadRecord() (*Record, error) +} + +// NewRecordReader constructs a reader exposing the records passed as arguments. +func NewRecordReader(records ...Record) RecordReader { + switch len(records) { + case 0: + return emptyRecordReader{} + default: + r := &recordReader{records: make([]Record, len(records))} + copy(r.records, records) + return r + } +} + +// MultiRecordReader merges multiple record batches into one. +func MultiRecordReader(batches ...RecordReader) RecordReader { + switch len(batches) { + case 0: + return emptyRecordReader{} + case 1: + return batches[0] + default: + m := &multiRecordReader{batches: make([]RecordReader, len(batches))} + copy(m.batches, batches) + return m + } +} + +func forEachRecord(r RecordReader, f func(int, *Record) error) error { + for i := 0; ; i++ { + rec, err := r.ReadRecord() + + if err != nil { + if errors.Is(err, io.EOF) { + err = nil + } + return err + } + + if err := handleRecord(i, rec, f); err != nil { + return err + } + } +} + +func handleRecord(i int, r *Record, f func(int, *Record) error) error { + if r.Key != nil { + defer r.Key.Close() + } + if r.Value != nil { + defer r.Value.Close() + } + return f(i, r) +} + +type recordReader struct { + records []Record + index int +} + +func (r *recordReader) ReadRecord() (*Record, error) { + if i := r.index; i >= 0 && i < len(r.records) { + r.index++ + return &r.records[i], nil + } + return nil, io.EOF +} + +type multiRecordReader struct { + batches []RecordReader + index int +} + +func (m *multiRecordReader) ReadRecord() (*Record, error) { + for { + if m.index == len(m.batches) { + return nil, io.EOF + } + r, err := m.batches[m.index].ReadRecord() + if err == nil { + return r, nil + } + if !errors.Is(err, io.EOF) { + return nil, err + } + m.index++ + } +} + +// optimizedRecordReader is an implementation of a RecordReader which exposes a +// sequence. +type optimizedRecordReader struct { + records []optimizedRecord + index int + buffer Record + headers [][]Header +} + +func (r *optimizedRecordReader) ReadRecord() (*Record, error) { + if i := r.index; i >= 0 && i < len(r.records) { + rec := &r.records[i] + r.index++ + r.buffer = Record{ + Offset: rec.offset, + Time: rec.time(), + Key: rec.key(), + Value: rec.value(), + } + if i < len(r.headers) { + r.buffer.Headers = r.headers[i] + } + return &r.buffer, nil + } + return nil, io.EOF +} + +type optimizedRecord struct { + offset int64 + timestamp int64 + keyRef *pageRef + valueRef *pageRef +} + +func (r *optimizedRecord) time() time.Time { + return makeTime(r.timestamp) +} + +func (r *optimizedRecord) key() Bytes { + return makeBytes(r.keyRef) +} + +func (r *optimizedRecord) value() Bytes { + return makeBytes(r.valueRef) +} + +func makeBytes(ref *pageRef) Bytes { + if ref == nil { + return nil + } + return ref +} + +type emptyRecordReader struct{} + +func (emptyRecordReader) ReadRecord() (*Record, error) { return nil, io.EOF } + +// ControlRecord represents a record read from a control batch. +type ControlRecord struct { + Offset int64 + Time time.Time + Version int16 + Type int16 + Data []byte + Headers []Header +} + +func ReadControlRecord(r *Record) (*ControlRecord, error) { + if r.Key != nil { + defer r.Key.Close() + } + if r.Value != nil { + defer r.Value.Close() + } + + k, err := ReadAll(r.Key) + if err != nil { + return nil, err + } + if k == nil { + return nil, Error("invalid control record with nil key") + } + if len(k) != 4 { + return nil, Errorf("invalid control record with key of size %d", len(k)) + } + + v, err := ReadAll(r.Value) + if err != nil { + return nil, err + } + + c := &ControlRecord{ + Offset: r.Offset, + Time: r.Time, + Version: readInt16(k[:2]), + Type: readInt16(k[2:]), + Data: v, + Headers: r.Headers, + } + + return c, nil +} + +func (cr *ControlRecord) Key() Bytes { + k := make([]byte, 4) + writeInt16(k[:2], cr.Version) + writeInt16(k[2:], cr.Type) + return NewBytes(k) +} + +func (cr *ControlRecord) Value() Bytes { + return NewBytes(cr.Data) +} + +func (cr *ControlRecord) Record() Record { + return Record{ + Offset: cr.Offset, + Time: cr.Time, + Key: cr.Key(), + Value: cr.Value(), + Headers: cr.Headers, + } +} + +// ControlBatch is an implementation of the RecordReader interface representing +// control batches returned by kafka brokers. +type ControlBatch struct { + Attributes Attributes + PartitionLeaderEpoch int32 + BaseOffset int64 + ProducerID int64 + ProducerEpoch int16 + BaseSequence int32 + Records RecordReader +} + +// NewControlBatch constructs a control batch from the list of records passed as +// arguments. +func NewControlBatch(records ...ControlRecord) *ControlBatch { + rawRecords := make([]Record, len(records)) + for i, cr := range records { + rawRecords[i] = cr.Record() + } + return &ControlBatch{ + Records: NewRecordReader(rawRecords...), + } +} + +func (c *ControlBatch) ReadRecord() (*Record, error) { + return c.Records.ReadRecord() +} + +func (c *ControlBatch) ReadControlRecord() (*ControlRecord, error) { + r, err := c.ReadRecord() + if err != nil { + return nil, err + } + if r.Key != nil { + defer r.Key.Close() + } + if r.Value != nil { + defer r.Value.Close() + } + return ReadControlRecord(r) +} + +func (c *ControlBatch) Offset() int64 { + return c.BaseOffset +} + +func (c *ControlBatch) Version() int { + return 2 +} + +// RecordBatch is an implementation of the RecordReader interface representing +// regular record batches (v2). +type RecordBatch struct { + Attributes Attributes + PartitionLeaderEpoch int32 + BaseOffset int64 + ProducerID int64 + ProducerEpoch int16 + BaseSequence int32 + Records RecordReader +} + +func (r *RecordBatch) ReadRecord() (*Record, error) { + return r.Records.ReadRecord() +} + +func (r *RecordBatch) Offset() int64 { + return r.BaseOffset +} + +func (r *RecordBatch) Version() int { + return 2 +} + +// MessageSet is an implementation of the RecordReader interface representing +// regular message sets (v1). +type MessageSet struct { + Attributes Attributes + BaseOffset int64 + Records RecordReader +} + +func (m *MessageSet) ReadRecord() (*Record, error) { + return m.Records.ReadRecord() +} + +func (m *MessageSet) Offset() int64 { + return m.BaseOffset +} + +func (m *MessageSet) Version() int { + return 1 +} + +// RecordStream is an implementation of the RecordReader interface which +// combines multiple underlying RecordReader and only expose records that +// are not from control batches. +type RecordStream struct { + Records []RecordReader + index int +} + +func (s *RecordStream) ReadRecord() (*Record, error) { + for { + if s.index < 0 || s.index >= len(s.Records) { + return nil, io.EOF + } + + if _, isControl := s.Records[s.index].(*ControlBatch); isControl { + s.index++ + continue + } + + r, err := s.Records[s.index].ReadRecord() + if err != nil { + if errors.Is(err, io.EOF) { + s.index++ + continue + } + } + + return r, err + } +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/record_v1.go b/vendor/github.com/segmentio/kafka-go/protocol/record_v1.go new file mode 100644 index 00000000000..5757e1146ae --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/record_v1.go @@ -0,0 +1,243 @@ +package protocol + +import ( + "errors" + "hash/crc32" + "io" + "math" + "time" +) + +func readMessage(b *pageBuffer, d *decoder) (attributes int8, baseOffset, timestamp int64, key, value Bytes, err error) { + md := decoder{ + reader: d, + remain: 12, + } + + baseOffset = md.readInt64() + md.remain = int(md.readInt32()) + + crc := uint32(md.readInt32()) + md.setCRC(crc32.IEEETable) + magicByte := md.readInt8() + attributes = md.readInt8() + timestamp = int64(0) + + if magicByte != 0 { + timestamp = md.readInt64() + } + + keyOffset := b.Size() + keyLength := int(md.readInt32()) + hasKey := keyLength >= 0 + if hasKey { + md.writeTo(b, keyLength) + key = b.ref(keyOffset, b.Size()) + } + + valueOffset := b.Size() + valueLength := int(md.readInt32()) + hasValue := valueLength >= 0 + if hasValue { + md.writeTo(b, valueLength) + value = b.ref(valueOffset, b.Size()) + } + + if md.crc32 != crc { + err = Errorf("crc32 checksum mismatch (computed=%d found=%d)", md.crc32, crc) + } else { + err = dontExpectEOF(md.err) + } + + return +} + +func (rs *RecordSet) readFromVersion1(d *decoder) error { + var records RecordReader + + b := newPageBuffer() + defer b.unref() + + attributes, baseOffset, timestamp, key, value, err := readMessage(b, d) + if err != nil { + return err + } + + if compression := Attributes(attributes).Compression(); compression == 0 { + records = &message{ + Record: Record{ + Offset: baseOffset, + Time: makeTime(timestamp), + Key: key, + Value: value, + }, + } + } else { + // Can we have a non-nil key when reading a compressed message? + if key != nil { + key.Close() + } + if value == nil { + records = emptyRecordReader{} + } else { + defer value.Close() + + codec := compression.Codec() + if codec == nil { + return Errorf("unsupported compression codec: %d", compression) + } + decompressor := codec.NewReader(value) + defer decompressor.Close() + + b := newPageBuffer() + defer b.unref() + + d := &decoder{ + reader: decompressor, + remain: math.MaxInt32, + } + + r := &recordReader{ + records: make([]Record, 0, 32), + } + + for !d.done() { + _, offset, timestamp, key, value, err := readMessage(b, d) + if err != nil { + if errors.Is(err, io.ErrUnexpectedEOF) { + break + } + for _, rec := range r.records { + closeBytes(rec.Key) + closeBytes(rec.Value) + } + return err + } + r.records = append(r.records, Record{ + Offset: offset, + Time: makeTime(timestamp), + Key: key, + Value: value, + }) + } + + if baseOffset != 0 { + // https://kafka.apache.org/documentation/#messageset + // + // In version 1, to avoid server side re-compression, only the + // wrapper message will be assigned an offset. The inner messages + // will have relative offsets. The absolute offset can be computed + // using the offset from the outer message, which corresponds to the + // offset assigned to the last inner message. + lastRelativeOffset := int64(len(r.records)) - 1 + + for i := range r.records { + r.records[i].Offset = baseOffset - (lastRelativeOffset - r.records[i].Offset) + } + } + + records = r + } + } + + *rs = RecordSet{ + Version: 1, + Attributes: Attributes(attributes), + Records: records, + } + + return nil +} + +func (rs *RecordSet) writeToVersion1(buffer *pageBuffer, bufferOffset int64) error { + attributes := rs.Attributes + records := rs.Records + + if compression := attributes.Compression(); compression != 0 { + if codec := compression.Codec(); codec != nil { + // In the message format version 1, compression is achieved by + // compressing the value of a message which recursively contains + // the representation of the compressed message set. + subset := *rs + subset.Attributes &= ^7 // erase compression + + if err := subset.writeToVersion1(buffer, bufferOffset); err != nil { + return err + } + + compressed := newPageBuffer() + defer compressed.unref() + + compressor := codec.NewWriter(compressed) + defer compressor.Close() + + var err error + buffer.pages.scan(bufferOffset, buffer.Size(), func(b []byte) bool { + _, err = compressor.Write(b) + return err == nil + }) + if err != nil { + return err + } + if err := compressor.Close(); err != nil { + return err + } + + buffer.Truncate(int(bufferOffset)) + + records = &message{ + Record: Record{ + Value: compressed, + }, + } + } + } + + e := encoder{writer: buffer} + currentTimestamp := timestamp(time.Now()) + + return forEachRecord(records, func(i int, r *Record) error { + t := timestamp(r.Time) + if t == 0 { + t = currentTimestamp + } + + messageOffset := buffer.Size() + e.writeInt64(int64(i)) + e.writeInt32(0) // message size placeholder + e.writeInt32(0) // crc32 placeholder + e.setCRC(crc32.IEEETable) + e.writeInt8(1) // magic byte: version 1 + e.writeInt8(int8(attributes)) + e.writeInt64(t) + + if err := e.writeNullBytesFrom(r.Key); err != nil { + return err + } + + if err := e.writeNullBytesFrom(r.Value); err != nil { + return err + } + + b0 := packUint32(uint32(buffer.Size() - (messageOffset + 12))) + b1 := packUint32(e.crc32) + + buffer.WriteAt(b0[:], messageOffset+8) + buffer.WriteAt(b1[:], messageOffset+12) + e.setCRC(nil) + return nil + }) +} + +type message struct { + Record Record + read bool +} + +func (m *message) ReadRecord() (*Record, error) { + if m.read { + return nil, io.EOF + } + m.read = true + return &m.Record, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/record_v2.go b/vendor/github.com/segmentio/kafka-go/protocol/record_v2.go new file mode 100644 index 00000000000..366ec4bff15 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/record_v2.go @@ -0,0 +1,315 @@ +package protocol + +import ( + "fmt" + "hash/crc32" + "io" + "time" +) + +func (rs *RecordSet) readFromVersion2(d *decoder) error { + baseOffset := d.readInt64() + batchLength := d.readInt32() + + if int(batchLength) > d.remain || d.err != nil { + d.discardAll() + return nil + } + + dec := &decoder{ + reader: d, + remain: int(batchLength), + } + + partitionLeaderEpoch := dec.readInt32() + magicByte := dec.readInt8() + crc := dec.readInt32() + + dec.setCRC(crc32.MakeTable(crc32.Castagnoli)) + + attributes := dec.readInt16() + lastOffsetDelta := dec.readInt32() + firstTimestamp := dec.readInt64() + maxTimestamp := dec.readInt64() + producerID := dec.readInt64() + producerEpoch := dec.readInt16() + baseSequence := dec.readInt32() + numRecords := dec.readInt32() + reader := io.Reader(dec) + + // unused + _ = lastOffsetDelta + _ = maxTimestamp + + if compression := Attributes(attributes).Compression(); compression != 0 { + codec := compression.Codec() + if codec == nil { + return fmt.Errorf("unsupported compression codec (%d)", compression) + } + decompressor := codec.NewReader(reader) + defer decompressor.Close() + reader = decompressor + } + + buffer := newPageBuffer() + defer buffer.unref() + + _, err := buffer.ReadFrom(reader) + if err != nil { + return err + } + if dec.crc32 != uint32(crc) { + return fmt.Errorf("crc32 checksum mismatch (computed=%d found=%d)", dec.crc32, uint32(crc)) + } + + recordsLength := buffer.Len() + dec.reader = buffer + dec.remain = recordsLength + + records := make([]optimizedRecord, numRecords) + // These are two lazy allocators that will be used to optimize allocation of + // page references for keys and values. + // + // By default, no memory is allocated and on first use, numRecords page refs + // are allocated in a contiguous memory space, and the allocators return + // pointers into those arrays for each page ref that get requested. + // + // The reasoning is that kafka partitions typically have records of a single + // form, which either have no keys, no values, or both keys and values. + // Using lazy allocators adapts nicely to these patterns to only allocate + // the memory that is needed by the program, while still reducing the number + // of malloc calls made by the program. + // + // Using a single allocator for both keys and values keeps related values + // close by in memory, making access to the records more friendly to CPU + // caches. + alloc := pageRefAllocator{size: int(numRecords)} + // Following the same reasoning that kafka partitions will typically have + // records with repeating formats, we expect to either find records with + // no headers, or records which always contain headers. + // + // To reduce the memory footprint when records have no headers, the Header + // slices are lazily allocated in a separate array. + headers := ([][]Header)(nil) + + for i := range records { + r := &records[i] + _ = dec.readVarInt() // record length (unused) + _ = dec.readInt8() // record attributes (unused) + timestampDelta := dec.readVarInt() + offsetDelta := dec.readVarInt() + + r.offset = baseOffset + offsetDelta + r.timestamp = firstTimestamp + timestampDelta + + keyLength := dec.readVarInt() + keyOffset := int64(recordsLength - dec.remain) + if keyLength > 0 { + dec.discard(int(keyLength)) + } + + valueLength := dec.readVarInt() + valueOffset := int64(recordsLength - dec.remain) + if valueLength > 0 { + dec.discard(int(valueLength)) + } + + if numHeaders := dec.readVarInt(); numHeaders > 0 { + if headers == nil { + headers = make([][]Header, numRecords) + } + + h := make([]Header, numHeaders) + + for i := range h { + h[i] = Header{ + Key: dec.readVarString(), + Value: dec.readVarBytes(), + } + } + + headers[i] = h + } + + if dec.err != nil { + records = records[:i] + break + } + + if keyLength >= 0 { + r.keyRef = alloc.newPageRef() + buffer.refTo(r.keyRef, keyOffset, keyOffset+keyLength) + } + + if valueLength >= 0 { + r.valueRef = alloc.newPageRef() + buffer.refTo(r.valueRef, valueOffset, valueOffset+valueLength) + } + } + + // Note: it's unclear whether kafka 0.11+ still truncates the responses, + // all attempts I made at constructing a test to trigger a truncation have + // failed. I kept this code here as a safeguard but it may never execute. + if dec.err != nil && len(records) == 0 { + return dec.err + } + + *rs = RecordSet{ + Version: magicByte, + Attributes: Attributes(attributes), + Records: &optimizedRecordReader{ + records: records, + headers: headers, + }, + } + + if rs.Attributes.Control() { + rs.Records = &ControlBatch{ + Attributes: rs.Attributes, + PartitionLeaderEpoch: partitionLeaderEpoch, + BaseOffset: baseOffset, + ProducerID: producerID, + ProducerEpoch: producerEpoch, + BaseSequence: baseSequence, + Records: rs.Records, + } + } else { + rs.Records = &RecordBatch{ + Attributes: rs.Attributes, + PartitionLeaderEpoch: partitionLeaderEpoch, + BaseOffset: baseOffset, + ProducerID: producerID, + ProducerEpoch: producerEpoch, + BaseSequence: baseSequence, + Records: rs.Records, + } + } + + return nil +} + +func (rs *RecordSet) writeToVersion2(buffer *pageBuffer, bufferOffset int64) error { + records := rs.Records + numRecords := int32(0) + + e := &encoder{writer: buffer} + e.writeInt64(0) // base offset | 0 +8 + e.writeInt32(0) // placeholder for record batch length | 8 +4 + e.writeInt32(-1) // partition leader epoch | 12 +3 + e.writeInt8(2) // magic byte | 16 +1 + e.writeInt32(0) // placeholder for crc32 checksum | 17 +4 + e.writeInt16(int16(rs.Attributes)) // attributes | 21 +2 + e.writeInt32(0) // placeholder for lastOffsetDelta | 23 +4 + e.writeInt64(0) // placeholder for firstTimestamp | 27 +8 + e.writeInt64(0) // placeholder for maxTimestamp | 35 +8 + e.writeInt64(-1) // producer id | 43 +8 + e.writeInt16(-1) // producer epoch | 51 +2 + e.writeInt32(-1) // base sequence | 53 +4 + e.writeInt32(0) // placeholder for numRecords | 57 +4 + + var compressor io.WriteCloser + if compression := rs.Attributes.Compression(); compression != 0 { + if codec := compression.Codec(); codec != nil { + compressor = codec.NewWriter(buffer) + e.writer = compressor + } + } + + currentTimestamp := timestamp(time.Now()) + lastOffsetDelta := int32(0) + firstTimestamp := int64(0) + maxTimestamp := int64(0) + + err := forEachRecord(records, func(i int, r *Record) error { + t := timestamp(r.Time) + if t == 0 { + t = currentTimestamp + } + if i == 0 { + firstTimestamp = t + } + if t > maxTimestamp { + maxTimestamp = t + } + + timestampDelta := t - firstTimestamp + offsetDelta := int64(i) + lastOffsetDelta = int32(offsetDelta) + + length := 1 + // attributes + sizeOfVarInt(timestampDelta) + + sizeOfVarInt(offsetDelta) + + sizeOfVarNullBytesIface(r.Key) + + sizeOfVarNullBytesIface(r.Value) + + sizeOfVarInt(int64(len(r.Headers))) + + for _, h := range r.Headers { + length += sizeOfVarString(h.Key) + sizeOfVarNullBytes(h.Value) + } + + e.writeVarInt(int64(length)) + e.writeInt8(0) // record attributes (unused) + e.writeVarInt(timestampDelta) + e.writeVarInt(offsetDelta) + + if err := e.writeVarNullBytesFrom(r.Key); err != nil { + return err + } + + if err := e.writeVarNullBytesFrom(r.Value); err != nil { + return err + } + + e.writeVarInt(int64(len(r.Headers))) + + for _, h := range r.Headers { + e.writeVarString(h.Key) + e.writeVarNullBytes(h.Value) + } + + numRecords++ + return nil + }) + + if err != nil { + return err + } + + if compressor != nil { + if err := compressor.Close(); err != nil { + return err + } + } + + if numRecords == 0 { + return ErrNoRecord + } + + b2 := packUint32(uint32(lastOffsetDelta)) + b3 := packUint64(uint64(firstTimestamp)) + b4 := packUint64(uint64(maxTimestamp)) + b5 := packUint32(uint32(numRecords)) + + buffer.WriteAt(b2[:], bufferOffset+23) + buffer.WriteAt(b3[:], bufferOffset+27) + buffer.WriteAt(b4[:], bufferOffset+35) + buffer.WriteAt(b5[:], bufferOffset+57) + + totalLength := buffer.Size() - bufferOffset + batchLength := totalLength - 12 + + checksum := uint32(0) + crcTable := crc32.MakeTable(crc32.Castagnoli) + + buffer.pages.scan(bufferOffset+21, bufferOffset+totalLength, func(chunk []byte) bool { + checksum = crc32.Update(checksum, crcTable, chunk) + return true + }) + + b0 := packUint32(uint32(batchLength)) + b1 := packUint32(checksum) + + buffer.WriteAt(b0[:], bufferOffset+8) + buffer.WriteAt(b1[:], bufferOffset+17) + return nil +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/reflect.go b/vendor/github.com/segmentio/kafka-go/protocol/reflect.go new file mode 100644 index 00000000000..4d664b26be2 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/reflect.go @@ -0,0 +1,102 @@ +//go:build !unsafe +// +build !unsafe + +package protocol + +import ( + "reflect" +) + +type index []int + +type _type struct{ typ reflect.Type } + +func typeOf(x interface{}) _type { + return makeType(reflect.TypeOf(x)) +} + +func elemTypeOf(x interface{}) _type { + return makeType(reflect.TypeOf(x).Elem()) +} + +func makeType(t reflect.Type) _type { + return _type{typ: t} +} + +type value struct { + val reflect.Value +} + +func nonAddressableValueOf(x interface{}) value { + return value{val: reflect.ValueOf(x)} +} + +func valueOf(x interface{}) value { + return value{val: reflect.ValueOf(x).Elem()} +} + +func (v value) bool() bool { return v.val.Bool() } + +func (v value) int8() int8 { return int8(v.int64()) } + +func (v value) int16() int16 { return int16(v.int64()) } + +func (v value) int32() int32 { return int32(v.int64()) } + +func (v value) int64() int64 { return v.val.Int() } + +func (v value) float64() float64 { return v.val.Float() } + +func (v value) string() string { return v.val.String() } + +func (v value) bytes() []byte { return v.val.Bytes() } + +func (v value) iface(t reflect.Type) interface{} { return v.val.Addr().Interface() } + +func (v value) array(t reflect.Type) array { return array(v) } + +func (v value) setBool(b bool) { v.val.SetBool(b) } + +func (v value) setInt8(i int8) { v.setInt64(int64(i)) } + +func (v value) setInt16(i int16) { v.setInt64(int64(i)) } + +func (v value) setInt32(i int32) { v.setInt64(int64(i)) } + +func (v value) setInt64(i int64) { v.val.SetInt(i) } + +func (v value) setFloat64(f float64) { v.val.SetFloat(f) } + +func (v value) setString(s string) { v.val.SetString(s) } + +func (v value) setBytes(b []byte) { v.val.SetBytes(b) } + +func (v value) setArray(a array) { + if a.val.IsValid() { + v.val.Set(a.val) + } else { + v.val.Set(reflect.Zero(v.val.Type())) + } +} + +func (v value) fieldByIndex(i index) value { + return value{val: v.val.FieldByIndex(i)} +} + +type array struct { + val reflect.Value +} + +func makeArray(t reflect.Type, n int) array { + return array{val: reflect.MakeSlice(reflect.SliceOf(t), n, n)} +} + +func (a array) index(i int) value { return value{val: a.val.Index(i)} } + +func (a array) length() int { return a.val.Len() } + +func (a array) isNil() bool { return a.val.IsNil() } + +func indexOf(s reflect.StructField) index { return index(s.Index) } + +func bytesToString(b []byte) string { return string(b) } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/reflect_unsafe.go b/vendor/github.com/segmentio/kafka-go/protocol/reflect_unsafe.go new file mode 100644 index 00000000000..9eca5060f08 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/reflect_unsafe.go @@ -0,0 +1,143 @@ +//go:build unsafe +// +build unsafe + +package protocol + +import ( + "reflect" + "unsafe" +) + +type iface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +type slice struct { + ptr unsafe.Pointer + len int + cap int +} + +type index uintptr + +type _type struct { + ptr unsafe.Pointer +} + +func typeOf(x interface{}) _type { + return _type{ptr: ((*iface)(unsafe.Pointer(&x))).typ} +} + +func elemTypeOf(x interface{}) _type { + return makeType(reflect.TypeOf(x).Elem()) +} + +func makeType(t reflect.Type) _type { + return _type{ptr: ((*iface)(unsafe.Pointer(&t))).ptr} +} + +type value struct { + ptr unsafe.Pointer +} + +func nonAddressableValueOf(x interface{}) value { + return valueOf(x) +} + +func valueOf(x interface{}) value { + return value{ptr: ((*iface)(unsafe.Pointer(&x))).ptr} +} + +func makeValue(t reflect.Type) value { + return value{ptr: unsafe.Pointer(reflect.New(t).Pointer())} +} + +func (v value) bool() bool { return *(*bool)(v.ptr) } + +func (v value) int8() int8 { return *(*int8)(v.ptr) } + +func (v value) int16() int16 { return *(*int16)(v.ptr) } + +func (v value) int32() int32 { return *(*int32)(v.ptr) } + +func (v value) int64() int64 { return *(*int64)(v.ptr) } + +func (v value) float64() float64 { return *(*float64)(v.ptr) } + +func (v value) string() string { return *(*string)(v.ptr) } + +func (v value) bytes() []byte { return *(*[]byte)(v.ptr) } + +func (v value) iface(t reflect.Type) interface{} { + return *(*interface{})(unsafe.Pointer(&iface{ + typ: ((*iface)(unsafe.Pointer(&t))).ptr, + ptr: v.ptr, + })) +} + +func (v value) array(t reflect.Type) array { + return array{ + size: uintptr(t.Size()), + elem: ((*slice)(v.ptr)).ptr, + len: ((*slice)(v.ptr)).len, + } +} + +func (v value) setBool(b bool) { *(*bool)(v.ptr) = b } + +func (v value) setInt8(i int8) { *(*int8)(v.ptr) = i } + +func (v value) setInt16(i int16) { *(*int16)(v.ptr) = i } + +func (v value) setInt32(i int32) { *(*int32)(v.ptr) = i } + +func (v value) setInt64(i int64) { *(*int64)(v.ptr) = i } + +func (v value) setFloat64(f float64) { *(*float64)(v.ptr) = f } + +func (v value) setString(s string) { *(*string)(v.ptr) = s } + +func (v value) setBytes(b []byte) { *(*[]byte)(v.ptr) = b } + +func (v value) setArray(a array) { *(*slice)(v.ptr) = slice{ptr: a.elem, len: a.len, cap: a.len} } + +func (v value) fieldByIndex(i index) value { + return value{ptr: unsafe.Pointer(uintptr(v.ptr) + uintptr(i))} +} + +type array struct { + elem unsafe.Pointer + size uintptr + len int +} + +var ( + emptyArray struct{} +) + +func makeArray(t reflect.Type, n int) array { + var elem unsafe.Pointer + var size = uintptr(t.Size()) + if n == 0 { + elem = unsafe.Pointer(&emptyArray) + } else { + elem = unsafe_NewArray(((*iface)(unsafe.Pointer(&t))).ptr, n) + } + return array{elem: elem, size: size, len: n} +} + +func (a array) index(i int) value { + return value{ptr: unsafe.Pointer(uintptr(a.elem) + (uintptr(i) * a.size))} +} + +func (a array) length() int { return a.len } + +func (a array) isNil() bool { return a.elem == nil } + +func indexOf(s reflect.StructField) index { return index(s.Offset) } + +func bytesToString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) } + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer diff --git a/vendor/github.com/segmentio/kafka-go/protocol/request.go b/vendor/github.com/segmentio/kafka-go/protocol/request.go new file mode 100644 index 00000000000..8b99e053711 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/request.go @@ -0,0 +1,128 @@ +package protocol + +import ( + "fmt" + "io" +) + +func ReadRequest(r io.Reader) (apiVersion int16, correlationID int32, clientID string, msg Message, err error) { + d := &decoder{reader: r, remain: 4} + size := d.readInt32() + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return + } + + d.remain = int(size) + apiKey := ApiKey(d.readInt16()) + apiVersion = d.readInt16() + correlationID = d.readInt32() + clientID = d.readString() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + err = fmt.Errorf("unsupported api key: %d", i) + return + } + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return + } + + t := &apiTypes[apiKey] + if t == nil { + err = fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + return + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + err = fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + return + } + + req := &t.requests[apiVersion-minVersion] + + if req.flexible { + // In the flexible case, there's a tag buffer at the end of the request header + taggedCount := int(d.readUnsignedVarInt()) + for i := 0; i < taggedCount; i++ { + d.readUnsignedVarInt() // tagID + size := d.readUnsignedVarInt() + + // Just throw away the values for now + d.read(int(size)) + } + } + + msg = req.new() + req.decode(d, valueOf(msg)) + d.discardAll() + + if err = d.err; err != nil { + err = dontExpectEOF(err) + } + + return +} + +func WriteRequest(w io.Writer, apiVersion int16, correlationID int32, clientID string, msg Message) error { + apiKey := msg.ApiKey() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + return fmt.Errorf("unsupported api key: %d", i) + } + + t := &apiTypes[apiKey] + if t == nil { + return fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + } + + r := &t.requests[apiVersion-minVersion] + v := valueOf(msg) + b := newPageBuffer() + defer b.unref() + + e := &encoder{writer: b} + e.writeInt32(0) // placeholder for the request size + e.writeInt16(int16(apiKey)) + e.writeInt16(apiVersion) + e.writeInt32(correlationID) + + if r.flexible { + // Flexible messages use a nullable string for the client ID, then extra space for a + // tag buffer, which begins with a size value. Since we're not writing any fields into the + // latter, we can just write zero for now. + // + // See + // https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details. + e.writeNullString(clientID) + e.writeUnsignedVarInt(0) + } else { + // Technically, recent versions of kafka interpret this field as a nullable + // string, however kafka 0.10 expected a non-nullable string and fails with + // a NullPointerException when it receives a null client id. + e.writeString(clientID) + } + r.encode(e, v) + err := e.err + + if err == nil { + size := packUint32(uint32(b.Size()) - 4) + b.WriteAt(size[:], 0) + _, err = b.WriteTo(w) + } + + return err +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/response.go b/vendor/github.com/segmentio/kafka-go/protocol/response.go new file mode 100644 index 00000000000..6194803133c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/response.go @@ -0,0 +1,151 @@ +package protocol + +import ( + "crypto/tls" + "encoding/binary" + "errors" + "fmt" + "io" +) + +func ReadResponse(r io.Reader, apiKey ApiKey, apiVersion int16) (correlationID int32, msg Message, err error) { + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + err = fmt.Errorf("unsupported api key: %d", i) + return + } + + t := &apiTypes[apiKey] + if t == nil { + err = fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + return + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + err = fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + return + } + + d := &decoder{reader: r, remain: 4} + size := d.readInt32() + + if err = d.err; err != nil { + err = dontExpectEOF(err) + return + } + + d.remain = int(size) + correlationID = d.readInt32() + if err = d.err; err != nil { + if errors.Is(err, io.ErrUnexpectedEOF) { + // If a Writer/Reader is configured without TLS and connects + // to a broker expecting TLS the only message we return to the + // caller is io.ErrUnexpetedEOF which is opaque. This section + // tries to determine if that's what has happened. + // We first deconstruct the initial 4 bytes of the message + // from the size which was read earlier. + // Next, we examine those bytes to see if they looks like a TLS + // error message. If they do we wrap the io.ErrUnexpectedEOF + // with some context. + if looksLikeUnexpectedTLS(size) { + err = fmt.Errorf("%w: broker appears to be expecting TLS", io.ErrUnexpectedEOF) + } + return + } + err = dontExpectEOF(err) + return + } + + res := &t.responses[apiVersion-minVersion] + + if res.flexible { + // In the flexible case, there's a tag buffer at the end of the response header + taggedCount := int(d.readUnsignedVarInt()) + for i := 0; i < taggedCount; i++ { + d.readUnsignedVarInt() // tagID + size := d.readUnsignedVarInt() + + // Just throw away the values for now + d.read(int(size)) + } + } + + msg = res.new() + res.decode(d, valueOf(msg)) + d.discardAll() + + if err = d.err; err != nil { + err = dontExpectEOF(err) + } + + return +} + +func WriteResponse(w io.Writer, apiVersion int16, correlationID int32, msg Message) error { + apiKey := msg.ApiKey() + + if i := int(apiKey); i < 0 || i >= len(apiTypes) { + return fmt.Errorf("unsupported api key: %d", i) + } + + t := &apiTypes[apiKey] + if t == nil { + return fmt.Errorf("unsupported api: %s", apiNames[apiKey]) + } + + minVersion := t.minVersion() + maxVersion := t.maxVersion() + + if apiVersion < minVersion || apiVersion > maxVersion { + return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion) + } + + r := &t.responses[apiVersion-minVersion] + v := valueOf(msg) + b := newPageBuffer() + defer b.unref() + + e := &encoder{writer: b} + e.writeInt32(0) // placeholder for the response size + e.writeInt32(correlationID) + if r.flexible { + // Flexible messages use extra space for a tag buffer, + // which begins with a size value. Since we're not writing any fields into the + // latter, we can just write zero for now. + // + // See + // https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields + // for details. + e.writeUnsignedVarInt(0) + } + r.encode(e, v) + err := e.err + + if err == nil { + size := packUint32(uint32(b.Size()) - 4) + b.WriteAt(size[:], 0) + _, err = b.WriteTo(w) + } + + return err +} + +const ( + tlsAlertByte byte = 0x15 +) + +// looksLikeUnexpectedTLS returns true if the size passed in resemble +// the TLS alert message that is returned to a client which sends +// an invalid ClientHello message. +func looksLikeUnexpectedTLS(size int32) bool { + var sizeBytes [4]byte + binary.BigEndian.PutUint32(sizeBytes[:], uint32(size)) + + if sizeBytes[0] != tlsAlertByte { + return false + } + version := int(sizeBytes[1])<<8 | int(sizeBytes[2]) + return version <= tls.VersionTLS13 && version >= tls.VersionTLS10 +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/roundtrip.go b/vendor/github.com/segmentio/kafka-go/protocol/roundtrip.go new file mode 100644 index 00000000000..c23532ca75c --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/roundtrip.go @@ -0,0 +1,28 @@ +package protocol + +import ( + "io" +) + +// RoundTrip sends a request to a kafka broker and returns the response. +func RoundTrip(rw io.ReadWriter, apiVersion int16, correlationID int32, clientID string, req Message) (Message, error) { + if err := WriteRequest(rw, apiVersion, correlationID, clientID, req); err != nil { + return nil, err + } + if !hasResponse(req) { + return nil, nil + } + id, res, err := ReadResponse(rw, req.ApiKey(), apiVersion) + if err != nil { + return nil, err + } + if id != correlationID { + return nil, Errorf("correlation id mismatch (expected=%d, found=%d)", correlationID, id) + } + return res, nil +} + +func hasResponse(msg Message) bool { + x, _ := msg.(interface{ HasResponse() bool }) + return x == nil || x.HasResponse() +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/saslauthenticate/saslauthenticate.go b/vendor/github.com/segmentio/kafka-go/protocol/saslauthenticate/saslauthenticate.go new file mode 100644 index 00000000000..fdd7bfbcda5 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/saslauthenticate/saslauthenticate.go @@ -0,0 +1,66 @@ +package saslauthenticate + +import ( + "encoding/binary" + "io" + + "github.com/segmentio/kafka-go/protocol" +) + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + AuthBytes []byte `kafka:"min=v0,max=v1"` +} + +func (r *Request) RawExchange(rw io.ReadWriter) (protocol.Message, error) { + if err := r.writeTo(rw); err != nil { + return nil, err + } + return r.readResp(rw) +} + +func (*Request) Required(versions map[protocol.ApiKey]int16) bool { + const v0 = 0 + return versions[protocol.SaslHandshake] == v0 +} + +func (r *Request) writeTo(w io.Writer) error { + size := len(r.AuthBytes) + 4 + buf := make([]byte, size) + binary.BigEndian.PutUint32(buf[:4], uint32(len(r.AuthBytes))) + copy(buf[4:], r.AuthBytes) + _, err := w.Write(buf) + return err +} + +func (r *Request) readResp(read io.Reader) (protocol.Message, error) { + var lenBuf [4]byte + if _, err := io.ReadFull(read, lenBuf[:]); err != nil { + return nil, err + } + respLen := int32(binary.BigEndian.Uint32(lenBuf[:])) + data := make([]byte, respLen) + + if _, err := io.ReadFull(read, data[:]); err != nil { + return nil, err + } + return &Response{ + AuthBytes: data, + }, nil +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.SaslAuthenticate } + +type Response struct { + ErrorCode int16 `kafka:"min=v0,max=v1"` + ErrorMessage string `kafka:"min=v0,max=v1,nullable"` + AuthBytes []byte `kafka:"min=v0,max=v1"` + SessionLifetimeMs int64 `kafka:"min=v1,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.SaslAuthenticate } + +var _ protocol.RawExchanger = (*Request)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/protocol/saslhandshake/saslhandshake.go b/vendor/github.com/segmentio/kafka-go/protocol/saslhandshake/saslhandshake.go new file mode 100644 index 00000000000..aa72e8309a9 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/saslhandshake/saslhandshake.go @@ -0,0 +1,20 @@ +package saslhandshake + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + Mechanism string `kafka:"min=v0,max=v1"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.SaslHandshake } + +type Response struct { + ErrorCode int16 `kafka:"min=v0,max=v1"` + Mechanisms []string `kafka:"min=v0,max=v1"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.SaslHandshake } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/size.go b/vendor/github.com/segmentio/kafka-go/protocol/size.go new file mode 100644 index 00000000000..f487dfc5dfa --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/size.go @@ -0,0 +1,33 @@ +package protocol + +import ( + "math/bits" +) + +func sizeOfVarString(s string) int { + return sizeOfVarInt(int64(len(s))) + len(s) +} + +func sizeOfVarNullBytes(b []byte) int { + if b == nil { + return sizeOfVarInt(-1) + } + n := len(b) + return sizeOfVarInt(int64(n)) + n +} + +func sizeOfVarNullBytesIface(b Bytes) int { + if b == nil { + return sizeOfVarInt(-1) + } + n := b.Len() + return sizeOfVarInt(int64(n)) + n +} + +func sizeOfVarInt(i int64) int { + return sizeOfUnsignedVarInt(uint64((i << 1) ^ (i >> 63))) // zig-zag encoding +} + +func sizeOfUnsignedVarInt(i uint64) int { + return (bits.Len64(i|1) + 6) / 7 +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol/syncgroup/syncgroup.go b/vendor/github.com/segmentio/kafka-go/protocol/syncgroup/syncgroup.go new file mode 100644 index 00000000000..e1ced061549 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/syncgroup/syncgroup.go @@ -0,0 +1,50 @@ +package syncgroup + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v5,tag"` + + GroupID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` + GenerationID int32 `kafka:"min=v0,max=v5|min=v4,max=v5,compact"` + MemberID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` + GroupInstanceID string `kafka:"min=v3,max=v3,nullable|min=v4,max=v5,nullable,compact"` + ProtocolType string `kafka:"min=v5,max=v5"` + ProtocolName string `kafka:"min=v5,max=v5"` + Assignments []RequestAssignment `kafka:"min=v0,max=v5"` +} + +type RequestAssignment struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v5,tag"` + + MemberID string `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` + Assignment []byte `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.SyncGroup } + +func (r *Request) Group() string { return r.GroupID } + +var _ protocol.GroupMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v4,max=v5,tag"` + + ThrottleTimeMS int32 `kafka:"min=v1,max=v5"` + ErrorCode int16 `kafka:"min=v0,max=v5"` + ProtocolType string `kafka:"min=v5,max=v5"` + ProtocolName string `kafka:"min=v5,max=v5"` + Assignments []byte `kafka:"min=v0,max=v3|min=v4,max=v5,compact"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.SyncGroup } diff --git a/vendor/github.com/segmentio/kafka-go/protocol/txnoffsetcommit/txnoffsetcommit.go b/vendor/github.com/segmentio/kafka-go/protocol/txnoffsetcommit/txnoffsetcommit.go new file mode 100644 index 00000000000..85f3f05e31a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/protocol/txnoffsetcommit/txnoffsetcommit.go @@ -0,0 +1,77 @@ +package txnoffsetcommit + +import "github.com/segmentio/kafka-go/protocol" + +func init() { + protocol.Register(&Request{}, &Response{}) +} + +type Request struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + TransactionalID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + GroupID string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + ProducerID int64 `kafka:"min=v0,max=v3"` + ProducerEpoch int16 `kafka:"min=v0,max=v3"` + GenerationID int32 `kafka:"min=v3,max=v3"` + MemberID string `kafka:"min=v3,max=v3,compact"` + GroupInstanceID string `kafka:"min=v3,max=v3,compact,nullable"` + Topics []RequestTopic `kafka:"min=v0,max=v3"` +} + +type RequestTopic struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + Partitions []RequestPartition `kafka:"min=v0,max=v3"` +} + +type RequestPartition struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Partition int32 `kafka:"min=v0,max=v3"` + CommittedOffset int64 `kafka:"min=v0,max=v3"` + CommittedLeaderEpoch int32 `kafka:"min=v2,max=v3"` + CommittedMetadata string `kafka:"min=v0,max=v2|min=v3,max=v3,nullable,compact"` +} + +func (r *Request) ApiKey() protocol.ApiKey { return protocol.TxnOffsetCommit } + +func (r *Request) Group() string { return r.GroupID } + +var _ protocol.GroupMessage = (*Request)(nil) + +type Response struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + ThrottleTimeMs int32 `kafka:"min=v0,max=v3"` + Topics []ResponseTopic `kafka:"min=v0,max=v3"` +} + +type ResponseTopic struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Name string `kafka:"min=v0,max=v2|min=v3,max=v3,compact"` + Partitions []ResponsePartition `kafka:"min=v0,max=v3"` +} + +type ResponsePartition struct { + // We need at least one tagged field to indicate that this is a "flexible" message + // type. + _ struct{} `kafka:"min=v3,max=v3,tag"` + + Partition int32 `kafka:"min=v0,max=v3"` + ErrorCode int16 `kafka:"min=v0,max=v3"` +} + +func (r *Response) ApiKey() protocol.ApiKey { return protocol.TxnOffsetCommit } diff --git a/vendor/github.com/segmentio/kafka-go/read.go b/vendor/github.com/segmentio/kafka-go/read.go new file mode 100644 index 00000000000..ec2b38527ff --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/read.go @@ -0,0 +1,562 @@ +package kafka + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" +) + +var errShortRead = errors.New("not enough bytes available to load the response") + +func peekRead(r *bufio.Reader, sz int, n int, f func([]byte)) (int, error) { + if n > sz { + return sz, errShortRead + } + b, err := r.Peek(n) + if err != nil { + return sz, err + } + f(b) + return discardN(r, sz, n) +} + +func readInt8(r *bufio.Reader, sz int, v *int8) (int, error) { + return peekRead(r, sz, 1, func(b []byte) { *v = makeInt8(b) }) +} + +func readInt16(r *bufio.Reader, sz int, v *int16) (int, error) { + return peekRead(r, sz, 2, func(b []byte) { *v = makeInt16(b) }) +} + +func readInt32(r *bufio.Reader, sz int, v *int32) (int, error) { + return peekRead(r, sz, 4, func(b []byte) { *v = makeInt32(b) }) +} + +func readInt64(r *bufio.Reader, sz int, v *int64) (int, error) { + return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) }) +} + +func readVarInt(r *bufio.Reader, sz int, v *int64) (remain int, err error) { + // Optimistically assume that most of the time, there will be data buffered + // in the reader. If this is not the case, the buffer will be refilled after + // consuming zero bytes from the input. + input, _ := r.Peek(r.Buffered()) + x := uint64(0) + s := uint(0) + + for { + if len(input) > sz { + input = input[:sz] + } + + for i, b := range input { + if b < 0x80 { + x |= uint64(b) << s + *v = int64(x>>1) ^ -(int64(x) & 1) + n, err := r.Discard(i + 1) + return sz - n, err + } + + x |= uint64(b&0x7f) << s + s += 7 + } + + // Make room in the input buffer to load more data from the underlying + // stream. The x and s variables are left untouched, ensuring that the + // varint decoding can continue on the next loop iteration. + n, _ := r.Discard(len(input)) + sz -= n + if sz == 0 { + return 0, errShortRead + } + + // Fill the buffer: ask for one more byte, but in practice the reader + // will load way more from the underlying stream. + if _, err := r.Peek(1); err != nil { + if errors.Is(err, io.EOF) { + err = errShortRead + } + return sz, err + } + + // Grab as many bytes as possible from the buffer, then go on to the + // next loop iteration which is going to consume it. + input, _ = r.Peek(r.Buffered()) + } +} + +func readBool(r *bufio.Reader, sz int, v *bool) (int, error) { + return peekRead(r, sz, 1, func(b []byte) { *v = b[0] != 0 }) +} + +func readString(r *bufio.Reader, sz int, v *string) (int, error) { + return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { + *v, remain, err = readNewString(r, sz, n) + return + }) +} + +func readStringWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { + var err error + var len int16 + + if sz, err = readInt16(r, sz, &len); err != nil { + return sz, err + } + + n := int(len) + if n > sz { + return sz, errShortRead + } + + return cb(r, sz, n) +} + +func readNewString(r *bufio.Reader, sz int, n int) (string, int, error) { + b, sz, err := readNewBytes(r, sz, n) + return string(b), sz, err +} + +func readBytes(r *bufio.Reader, sz int, v *[]byte) (int, error) { + return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { + *v, remain, err = readNewBytes(r, sz, n) + return + }) +} + +func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { + var err error + var n int + + if sz, err = readArrayLen(r, sz, &n); err != nil { + return sz, err + } + + if n > sz { + return sz, errShortRead + } + + return cb(r, sz, n) +} + +func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) { + var err error + var b []byte + var shortRead bool + + if n > 0 { + if sz < n { + n = sz + shortRead = true + } + + b = make([]byte, n) + n, err = io.ReadFull(r, b) + b = b[:n] + sz -= n + + if err == nil && shortRead { + err = errShortRead + } + } + + return b, sz, err +} + +func readArrayLen(r *bufio.Reader, sz int, n *int) (int, error) { + var err error + var len int32 + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + *n = int(len) + return sz, nil +} + +func readArrayWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int) (int, error)) (int, error) { + var err error + var len int32 + + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + + for n := int(len); n > 0; n-- { + if sz, err = cb(r, sz); err != nil { + break + } + } + + return sz, err +} + +func readStringArray(r *bufio.Reader, sz int, v *[]string) (remain int, err error) { + var content []string + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var value string + if fnRemain, fnErr = readString(r, size, &value); fnErr != nil { + return + } + content = append(content, value) + return + } + if remain, err = readArrayWith(r, sz, fn); err != nil { + return + } + + *v = content + return +} + +func readMapStringInt32(r *bufio.Reader, sz int, v *map[string][]int32) (remain int, err error) { + var len int32 + if remain, err = readInt32(r, sz, &len); err != nil { + return + } + + content := make(map[string][]int32, len) + for i := 0; i < int(len); i++ { + var key string + var values []int32 + + if remain, err = readString(r, remain, &key); err != nil { + return + } + + fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { + var value int32 + if fnRemain, fnErr = readInt32(r, size, &value); fnErr != nil { + return + } + values = append(values, value) + return + } + if remain, err = readArrayWith(r, remain, fn); err != nil { + return + } + + content[key] = values + } + *v = content + + return +} + +func read(r *bufio.Reader, sz int, a interface{}) (int, error) { + switch v := a.(type) { + case *int8: + return readInt8(r, sz, v) + case *int16: + return readInt16(r, sz, v) + case *int32: + return readInt32(r, sz, v) + case *int64: + return readInt64(r, sz, v) + case *bool: + return readBool(r, sz, v) + case *string: + return readString(r, sz, v) + case *[]byte: + return readBytes(r, sz, v) + } + switch v := reflect.ValueOf(a).Elem(); v.Kind() { + case reflect.Struct: + return readStruct(r, sz, v) + case reflect.Slice: + return readSlice(r, sz, v) + default: + panic(fmt.Sprintf("unsupported type: %T", a)) + } +} + +func readStruct(r *bufio.Reader, sz int, v reflect.Value) (int, error) { + var err error + for i, n := 0, v.NumField(); i != n; i++ { + if sz, err = read(r, sz, v.Field(i).Addr().Interface()); err != nil { + return sz, err + } + } + return sz, nil +} + +func readSlice(r *bufio.Reader, sz int, v reflect.Value) (int, error) { + var err error + var len int32 + + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + + if n := int(len); n < 0 { + v.Set(reflect.Zero(v.Type())) + } else { + v.Set(reflect.MakeSlice(v.Type(), n, n)) + + for i := 0; i != n; i++ { + if sz, err = read(r, sz, v.Index(i).Addr().Interface()); err != nil { + return sz, err + } + } + } + + return sz, nil +} + +func readFetchResponseHeaderV2(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + MessageSetSize int32 + } + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(p.MessageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", p.MessageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return +} + +func readFetchResponseHeaderV5(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + type AbortedTransaction struct { + ProducerId int64 + FirstOffset int64 + } + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + } + var messageSetSize int32 + var abortedTransactions []AbortedTransaction + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + var abortedTransactionLen int + if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { + return + } + + if abortedTransactionLen == -1 { + abortedTransactions = nil + } else { + abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) + for i := 0; i < abortedTransactionLen; i++ { + if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { + return + } + } + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + remain, err = readInt32(r, remain, &messageSetSize) + if err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(messageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return + +} + +func readFetchResponseHeaderV10(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + var errorCode int16 + type AbortedTransaction struct { + ProducerId int64 + FirstOffset int64 + } + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + } + var messageSetSize int32 + var abortedTransactions []AbortedTransaction + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt16(r, remain, &errorCode); err != nil { + return + } + if errorCode != 0 { + err = Error(errorCode) + return + } + + if remain, err = discardInt32(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + var abortedTransactionLen int + if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { + return + } + + if abortedTransactionLen == -1 { + abortedTransactions = nil + } else { + abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) + for i := 0; i < abortedTransactionLen; i++ { + if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { + return + } + } + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + remain, err = readInt32(r, remain, &messageSetSize) + if err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(messageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return + +} diff --git a/vendor/github.com/segmentio/kafka-go/reader.go b/vendor/github.com/segmentio/kafka-go/reader.go new file mode 100644 index 00000000000..1acb676e9ab --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/reader.go @@ -0,0 +1,1619 @@ +package kafka + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" +) + +const ( + LastOffset int64 = -1 // The most recent offset available for a partition. + FirstOffset int64 = -2 // The least recent offset available for a partition. +) + +const ( + // defaultCommitRetries holds the number of commit attempts to make + // before giving up. + defaultCommitRetries = 3 +) + +const ( + // defaultFetchMinBytes of 1 byte means that fetch requests are answered as + // soon as a single byte of data is available or the fetch request times out + // waiting for data to arrive. + defaultFetchMinBytes = 1 +) + +var ( + errOnlyAvailableWithGroup = errors.New("unavailable when GroupID is not set") + errNotAvailableWithGroup = errors.New("unavailable when GroupID is set") +) + +const ( + // defaultReadBackoffMax/Min sets the boundaries for how long the reader wait before + // polling for new messages. + defaultReadBackoffMin = 100 * time.Millisecond + defaultReadBackoffMax = 1 * time.Second +) + +// Reader provides a high-level API for consuming messages from kafka. +// +// A Reader automatically manages reconnections to a kafka server, and +// blocking methods have context support for asynchronous cancellations. +// +// Note that it is important to call `Close()` on a `Reader` when a process exits. +// The kafka server needs a graceful disconnect to stop it from continuing to +// attempt to send messages to the connected clients. The given example will not +// call `Close()` if the process is terminated with SIGINT (ctrl-c at the shell) or +// SIGTERM (as docker stop or a kubernetes restart does). This can result in a +// delay when a new reader on the same topic connects (e.g. new process started +// or new container running). Use a `signal.Notify` handler to close the reader on +// process shutdown. +type Reader struct { + // immutable fields of the reader + config ReaderConfig + + // communication channels between the parent reader and its subreaders + msgs chan readerMessage + + // mutable fields of the reader (synchronized on the mutex) + mutex sync.Mutex + join sync.WaitGroup + cancel context.CancelFunc + stop context.CancelFunc + done chan struct{} + commits chan commitRequest + version int64 // version holds the generation of the spawned readers + offset int64 + lag int64 + closed bool + + // Without a group subscription (when Reader.config.GroupID == ""), + // when errors occur, the Reader gets a synthetic readerMessage with + // a non-nil err set. With group subscriptions however, when an error + // occurs in Reader.run, there's no reader running (sic, cf. reader vs. + // Reader) and there's no way to let the high-level methods like + // FetchMessage know that an error indeed occurred. If an error in run + // occurs, it will be non-block-sent to this unbuffered channel, where + // the high-level methods can select{} on it and notify the caller. + runError chan error + + // reader stats are all made of atomic values, no need for synchronization. + once uint32 + stctx context.Context + // reader stats are all made of atomic values, no need for synchronization. + // Use a pointer to ensure 64-bit alignment of the values. + stats *readerStats +} + +// useConsumerGroup indicates whether the Reader is part of a consumer group. +func (r *Reader) useConsumerGroup() bool { return r.config.GroupID != "" } + +func (r *Reader) getTopics() []string { + if len(r.config.GroupTopics) > 0 { + return r.config.GroupTopics[:] + } + + return []string{r.config.Topic} +} + +// useSyncCommits indicates whether the Reader is configured to perform sync or +// async commits. +func (r *Reader) useSyncCommits() bool { return r.config.CommitInterval == 0 } + +func (r *Reader) unsubscribe() { + r.cancel() + r.join.Wait() + // it would be interesting to drain the r.msgs channel at this point since + // it will contain buffered messages for partitions that may not be + // re-assigned to this reader in the next consumer group generation. + // however, draining the channel could race with the client calling + // ReadMessage, which could result in messages delivered and/or committed + // with gaps in the offset. for now, we will err on the side of caution and + // potentially have those messages be reprocessed in the next generation by + // another consumer to avoid such a race. +} + +func (r *Reader) subscribe(allAssignments map[string][]PartitionAssignment) { + offsets := make(map[topicPartition]int64) + for topic, assignments := range allAssignments { + for _, assignment := range assignments { + key := topicPartition{ + topic: topic, + partition: int32(assignment.ID), + } + offsets[key] = assignment.Offset + } + } + + r.mutex.Lock() + r.start(offsets) + r.mutex.Unlock() + + r.withLogger(func(l Logger) { + l.Printf("subscribed to topics and partitions: %+v", offsets) + }) +} + +// commitOffsetsWithRetry attempts to commit the specified offsets and retries +// up to the specified number of times. +func (r *Reader) commitOffsetsWithRetry(gen *Generation, offsetStash offsetStash, retries int) (err error) { + const ( + backoffDelayMin = 100 * time.Millisecond + backoffDelayMax = 5 * time.Second + ) + + for attempt := 0; attempt < retries; attempt++ { + if attempt != 0 { + if !sleep(r.stctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) { + return + } + } + + if err = gen.CommitOffsets(offsetStash); err == nil { + return + } + } + + return // err will not be nil +} + +// offsetStash holds offsets by topic => partition => offset. +type offsetStash map[string]map[int]int64 + +// merge updates the offsetStash with the offsets from the provided messages. +func (o offsetStash) merge(commits []commit) { + for _, c := range commits { + offsetsByPartition, ok := o[c.topic] + if !ok { + offsetsByPartition = map[int]int64{} + o[c.topic] = offsetsByPartition + } + + if offset, ok := offsetsByPartition[c.partition]; !ok || c.offset > offset { + offsetsByPartition[c.partition] = c.offset + } + } +} + +// reset clears the contents of the offsetStash. +func (o offsetStash) reset() { + for key := range o { + delete(o, key) + } +} + +// commitLoopImmediate handles each commit synchronously. +func (r *Reader) commitLoopImmediate(ctx context.Context, gen *Generation) { + offsets := offsetStash{} + + for { + select { + case <-ctx.Done(): + // drain the commit channel and prepare a single, final commit. + // the commit will combine any outstanding requests and the result + // will be sent back to all the callers of CommitMessages so that + // they can return. + var errchs []chan<- error + for hasCommits := true; hasCommits; { + select { + case req := <-r.commits: + offsets.merge(req.commits) + errchs = append(errchs, req.errch) + default: + hasCommits = false + } + } + err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries) + for _, errch := range errchs { + // NOTE : this will be a buffered channel and will not block. + errch <- err + } + return + + case req := <-r.commits: + offsets.merge(req.commits) + req.errch <- r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries) + offsets.reset() + } + } +} + +// commitLoopInterval handles each commit asynchronously with a period defined +// by ReaderConfig.CommitInterval. +func (r *Reader) commitLoopInterval(ctx context.Context, gen *Generation) { + ticker := time.NewTicker(r.config.CommitInterval) + defer ticker.Stop() + + // the offset stash should not survive rebalances b/c the consumer may + // receive new assignments. + offsets := offsetStash{} + + commit := func() { + if err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries); err != nil { + r.withErrorLogger(func(l Logger) { l.Printf("%v", err) }) + } else { + offsets.reset() + } + } + + for { + select { + case <-ctx.Done(): + // drain the commit channel in order to prepare the final commit. + for hasCommits := true; hasCommits; { + select { + case req := <-r.commits: + offsets.merge(req.commits) + default: + hasCommits = false + } + } + commit() + return + + case <-ticker.C: + commit() + + case req := <-r.commits: + offsets.merge(req.commits) + } + } +} + +// commitLoop processes commits off the commit chan. +func (r *Reader) commitLoop(ctx context.Context, gen *Generation) { + r.withLogger(func(l Logger) { + l.Printf("started commit for group %s\n", r.config.GroupID) + }) + defer r.withLogger(func(l Logger) { + l.Printf("stopped commit for group %s\n", r.config.GroupID) + }) + + if r.useSyncCommits() { + r.commitLoopImmediate(ctx, gen) + } else { + r.commitLoopInterval(ctx, gen) + } +} + +// run provides the main consumer group management loop. Each iteration performs the +// handshake to join the Reader to the consumer group. +// +// This function is responsible for closing the consumer group upon exit. +func (r *Reader) run(cg *ConsumerGroup) { + defer close(r.done) + defer cg.Close() + + r.withLogger(func(l Logger) { + l.Printf("entering loop for consumer group, %v\n", r.config.GroupID) + }) + + for { + // Limit the number of attempts at waiting for the next + // consumer generation. + var err error + var gen *Generation + for attempt := 1; attempt <= r.config.MaxAttempts; attempt++ { + gen, err = cg.Next(r.stctx) + if err == nil { + break + } + if errors.Is(err, r.stctx.Err()) { + return + } + r.stats.errors.observe(1) + r.withErrorLogger(func(l Logger) { + l.Printf("%v", err) + }) + // Continue with next attempt... + } + if err != nil { + // All attempts have failed. + select { + case r.runError <- err: + // If somebody's receiving on the runError, let + // them know the error occurred. + default: + // Otherwise, don't block to allow healing. + } + continue + } + + r.stats.rebalances.observe(1) + + r.subscribe(gen.Assignments) + + gen.Start(func(ctx context.Context) { + r.commitLoop(ctx, gen) + }) + gen.Start(func(ctx context.Context) { + // wait for the generation to end and then unsubscribe. + select { + case <-ctx.Done(): + // continue to next generation + case <-r.stctx.Done(): + // this will be the last loop because the reader is closed. + } + r.unsubscribe() + }) + } +} + +// ReaderConfig is a configuration object used to create new instances of +// Reader. +type ReaderConfig struct { + // The list of broker addresses used to connect to the kafka cluster. + Brokers []string + + // GroupID holds the optional consumer group id. If GroupID is specified, then + // Partition should NOT be specified e.g. 0 + GroupID string + + // GroupTopics allows specifying multiple topics, but can only be used in + // combination with GroupID, as it is a consumer-group feature. As such, if + // GroupID is set, then either Topic or GroupTopics must be defined. + GroupTopics []string + + // The topic to read messages from. + Topic string + + // Partition to read messages from. Either Partition or GroupID may + // be assigned, but not both + Partition int + + // An dialer used to open connections to the kafka server. This field is + // optional, if nil, the default dialer is used instead. + Dialer *Dialer + + // The capacity of the internal message queue, defaults to 100 if none is + // set. + QueueCapacity int + + // MinBytes indicates to the broker the minimum batch size that the consumer + // will accept. Setting a high minimum when consuming from a low-volume topic + // may result in delayed delivery when the broker does not have enough data to + // satisfy the defined minimum. + // + // Default: 1 + MinBytes int + + // MaxBytes indicates to the broker the maximum batch size that the consumer + // will accept. The broker will truncate a message to satisfy this maximum, so + // choose a value that is high enough for your largest message size. + // + // Default: 1MB + MaxBytes int + + // Maximum amount of time to wait for new data to come when fetching batches + // of messages from kafka. + // + // Default: 10s + MaxWait time.Duration + + // ReadBatchTimeout amount of time to wait to fetch message from kafka messages batch. + // + // Default: 10s + ReadBatchTimeout time.Duration + + // ReadLagInterval sets the frequency at which the reader lag is updated. + // Setting this field to a negative value disables lag reporting. + ReadLagInterval time.Duration + + // GroupBalancers is the priority-ordered list of client-side consumer group + // balancing strategies that will be offered to the coordinator. The first + // strategy that all group members support will be chosen by the leader. + // + // Default: [Range, RoundRobin] + // + // Only used when GroupID is set + GroupBalancers []GroupBalancer + + // HeartbeatInterval sets the optional frequency at which the reader sends the consumer + // group heartbeat update. + // + // Default: 3s + // + // Only used when GroupID is set + HeartbeatInterval time.Duration + + // CommitInterval indicates the interval at which offsets are committed to + // the broker. If 0, commits will be handled synchronously. + // + // Default: 0 + // + // Only used when GroupID is set + CommitInterval time.Duration + + // PartitionWatchInterval indicates how often a reader checks for partition changes. + // If a reader sees a partition change (such as a partition add) it will rebalance the group + // picking up new partitions. + // + // Default: 5s + // + // Only used when GroupID is set and WatchPartitionChanges is set. + PartitionWatchInterval time.Duration + + // WatchForPartitionChanges is used to inform kafka-go that a consumer group should be + // polling the brokers and rebalancing if any partition changes happen to the topic. + WatchPartitionChanges bool + + // SessionTimeout optionally sets the length of time that may pass without a heartbeat + // before the coordinator considers the consumer dead and initiates a rebalance. + // + // Default: 30s + // + // Only used when GroupID is set + SessionTimeout time.Duration + + // RebalanceTimeout optionally sets the length of time the coordinator will wait + // for members to join as part of a rebalance. For kafka servers under higher + // load, it may be useful to set this value higher. + // + // Default: 30s + // + // Only used when GroupID is set + RebalanceTimeout time.Duration + + // JoinGroupBackoff optionally sets the length of time to wait between re-joining + // the consumer group after an error. + // + // Default: 5s + JoinGroupBackoff time.Duration + + // RetentionTime optionally sets the length of time the consumer group will be saved + // by the broker + // + // Default: 24h + // + // Only used when GroupID is set + RetentionTime time.Duration + + // StartOffset determines from whence the consumer group should begin + // consuming when it finds a partition without a committed offset. If + // non-zero, it must be set to one of FirstOffset or LastOffset. + // + // Default: FirstOffset + // + // Only used when GroupID is set + StartOffset int64 + + // BackoffDelayMin optionally sets the smallest amount of time the reader will wait before + // polling for new messages + // + // Default: 100ms + ReadBackoffMin time.Duration + + // BackoffDelayMax optionally sets the maximum amount of time the reader will wait before + // polling for new messages + // + // Default: 1s + ReadBackoffMax time.Duration + + // If not nil, specifies a logger used to report internal changes within the + // reader. + Logger Logger + + // ErrorLogger is the logger used to report errors. If nil, the reader falls + // back to using Logger instead. + ErrorLogger Logger + + // IsolationLevel controls the visibility of transactional records. + // ReadUncommitted makes all records visible. With ReadCommitted only + // non-transactional and committed records are visible. + IsolationLevel IsolationLevel + + // Limit of how many attempts to connect will be made before returning the error. + // + // The default is to try 3 times. + MaxAttempts int + + // OffsetOutOfRangeError indicates that the reader should return an error in + // the event of an OffsetOutOfRange error, rather than retrying indefinitely. + // This flag is being added to retain backwards-compatibility, so it will be + // removed in a future version of kafka-go. + OffsetOutOfRangeError bool +} + +// Validate method validates ReaderConfig properties. +func (config *ReaderConfig) Validate() error { + if len(config.Brokers) == 0 { + return errors.New("cannot create a new kafka reader with an empty list of broker addresses") + } + + if config.Partition < 0 || config.Partition >= math.MaxInt32 { + return fmt.Errorf("partition number out of bounds: %d", config.Partition) + } + + if config.MinBytes < 0 { + return fmt.Errorf("invalid negative minimum batch size (min = %d)", config.MinBytes) + } + + if config.MaxBytes < 0 { + return fmt.Errorf("invalid negative maximum batch size (max = %d)", config.MaxBytes) + } + + if config.GroupID != "" { + if config.Partition != 0 { + return errors.New("either Partition or GroupID may be specified, but not both") + } + + if len(config.Topic) == 0 && len(config.GroupTopics) == 0 { + return errors.New("either Topic or GroupTopics must be specified with GroupID") + } + } else if len(config.Topic) == 0 { + return errors.New("cannot create a new kafka reader with an empty topic") + } + + if config.MinBytes > config.MaxBytes { + return fmt.Errorf("minimum batch size greater than the maximum (min = %d, max = %d)", config.MinBytes, config.MaxBytes) + } + + if config.ReadBackoffMax < 0 { + return fmt.Errorf("ReadBackoffMax out of bounds: %d", config.ReadBackoffMax) + } + + if config.ReadBackoffMin < 0 { + return fmt.Errorf("ReadBackoffMin out of bounds: %d", config.ReadBackoffMin) + } + + return nil +} + +// ReaderStats is a data structure returned by a call to Reader.Stats that exposes +// details about the behavior of the reader. +type ReaderStats struct { + Dials int64 `metric:"kafka.reader.dial.count" type:"counter"` + Fetches int64 `metric:"kafka.reader.fetch.count" type:"counter"` + Messages int64 `metric:"kafka.reader.message.count" type:"counter"` + Bytes int64 `metric:"kafka.reader.message.bytes" type:"counter"` + Rebalances int64 `metric:"kafka.reader.rebalance.count" type:"counter"` + Timeouts int64 `metric:"kafka.reader.timeout.count" type:"counter"` + Errors int64 `metric:"kafka.reader.error.count" type:"counter"` + + DialTime DurationStats `metric:"kafka.reader.dial.seconds"` + ReadTime DurationStats `metric:"kafka.reader.read.seconds"` + WaitTime DurationStats `metric:"kafka.reader.wait.seconds"` + FetchSize SummaryStats `metric:"kafka.reader.fetch.size"` + FetchBytes SummaryStats `metric:"kafka.reader.fetch.bytes"` + + Offset int64 `metric:"kafka.reader.offset" type:"gauge"` + Lag int64 `metric:"kafka.reader.lag" type:"gauge"` + MinBytes int64 `metric:"kafka.reader.fetch_bytes.min" type:"gauge"` + MaxBytes int64 `metric:"kafka.reader.fetch_bytes.max" type:"gauge"` + MaxWait time.Duration `metric:"kafka.reader.fetch_wait.max" type:"gauge"` + QueueLength int64 `metric:"kafka.reader.queue.length" type:"gauge"` + QueueCapacity int64 `metric:"kafka.reader.queue.capacity" type:"gauge"` + + ClientID string `tag:"client_id"` + Topic string `tag:"topic"` + Partition string `tag:"partition"` + + // The original `Fetches` field had a typo where the metric name was called + // "kafak..." instead of "kafka...", in order to offer time to fix monitors + // that may be relying on this mistake we are temporarily introducing this + // field. + DeprecatedFetchesWithTypo int64 `metric:"kafak.reader.fetch.count" type:"counter"` +} + +// readerStats is a struct that contains statistics on a reader. +type readerStats struct { + dials counter + fetches counter + messages counter + bytes counter + rebalances counter + timeouts counter + errors counter + dialTime summary + readTime summary + waitTime summary + fetchSize summary + fetchBytes summary + offset gauge + lag gauge + partition string +} + +// NewReader creates and returns a new Reader configured with config. +// The offset is initialized to FirstOffset. +func NewReader(config ReaderConfig) *Reader { + if err := config.Validate(); err != nil { + panic(err) + } + + if config.GroupID != "" { + if len(config.GroupBalancers) == 0 { + config.GroupBalancers = []GroupBalancer{ + RangeGroupBalancer{}, + RoundRobinGroupBalancer{}, + } + } + } + + if config.Dialer == nil { + config.Dialer = DefaultDialer + } + + if config.MaxBytes == 0 { + config.MaxBytes = 1e6 // 1 MB + } + + if config.MinBytes == 0 { + config.MinBytes = defaultFetchMinBytes + } + + if config.MaxWait == 0 { + config.MaxWait = 10 * time.Second + } + + if config.ReadBatchTimeout == 0 { + config.ReadBatchTimeout = 10 * time.Second + } + + if config.ReadLagInterval == 0 { + config.ReadLagInterval = 1 * time.Minute + } + + if config.ReadBackoffMin == 0 { + config.ReadBackoffMin = defaultReadBackoffMin + } + + if config.ReadBackoffMax == 0 { + config.ReadBackoffMax = defaultReadBackoffMax + } + + if config.ReadBackoffMax < config.ReadBackoffMin { + panic(fmt.Errorf("ReadBackoffMax %d smaller than ReadBackoffMin %d", config.ReadBackoffMax, config.ReadBackoffMin)) + } + + if config.QueueCapacity == 0 { + config.QueueCapacity = 100 + } + + if config.MaxAttempts == 0 { + config.MaxAttempts = 3 + } + + // when configured as a consumer group; stats should report a partition of -1 + readerStatsPartition := config.Partition + if config.GroupID != "" { + readerStatsPartition = -1 + } + + // when configured as a consume group, start version as 1 to ensure that only + // the rebalance function will start readers + version := int64(0) + if config.GroupID != "" { + version = 1 + } + + stctx, stop := context.WithCancel(context.Background()) + r := &Reader{ + config: config, + msgs: make(chan readerMessage, config.QueueCapacity), + cancel: func() {}, + commits: make(chan commitRequest, config.QueueCapacity), + stop: stop, + offset: FirstOffset, + stctx: stctx, + stats: &readerStats{ + dialTime: makeSummary(), + readTime: makeSummary(), + waitTime: makeSummary(), + fetchSize: makeSummary(), + fetchBytes: makeSummary(), + // Generate the string representation of the partition number only + // once when the reader is created. + partition: strconv.Itoa(readerStatsPartition), + }, + version: version, + } + if r.useConsumerGroup() { + r.done = make(chan struct{}) + r.runError = make(chan error) + cg, err := NewConsumerGroup(ConsumerGroupConfig{ + ID: r.config.GroupID, + Brokers: r.config.Brokers, + Dialer: r.config.Dialer, + Topics: r.getTopics(), + GroupBalancers: r.config.GroupBalancers, + HeartbeatInterval: r.config.HeartbeatInterval, + PartitionWatchInterval: r.config.PartitionWatchInterval, + WatchPartitionChanges: r.config.WatchPartitionChanges, + SessionTimeout: r.config.SessionTimeout, + RebalanceTimeout: r.config.RebalanceTimeout, + JoinGroupBackoff: r.config.JoinGroupBackoff, + RetentionTime: r.config.RetentionTime, + StartOffset: r.config.StartOffset, + Logger: r.config.Logger, + ErrorLogger: r.config.ErrorLogger, + }) + if err != nil { + panic(err) + } + go r.run(cg) + } + + return r +} + +// Config returns the reader's configuration. +func (r *Reader) Config() ReaderConfig { + return r.config +} + +// Close closes the stream, preventing the program from reading any more +// messages from it. +func (r *Reader) Close() error { + atomic.StoreUint32(&r.once, 1) + + r.mutex.Lock() + closed := r.closed + r.closed = true + r.mutex.Unlock() + + r.cancel() + r.stop() + r.join.Wait() + + if r.done != nil { + <-r.done + } + + if !closed { + close(r.msgs) + } + + return nil +} + +// ReadMessage reads and return the next message from the r. The method call +// blocks until a message becomes available, or an error occurs. The program +// may also specify a context to asynchronously cancel the blocking operation. +// +// The method returns io.EOF to indicate that the reader has been closed. +// +// If consumer groups are used, ReadMessage will automatically commit the +// offset when called. Note that this could result in an offset being committed +// before the message is fully processed. +// +// If more fine-grained control of when offsets are committed is required, it +// is recommended to use FetchMessage with CommitMessages instead. +func (r *Reader) ReadMessage(ctx context.Context) (Message, error) { + m, err := r.FetchMessage(ctx) + if err != nil { + return Message{}, err + } + + if r.useConsumerGroup() { + if err := r.CommitMessages(ctx, m); err != nil { + return Message{}, err + } + } + + return m, nil +} + +// FetchMessage reads and return the next message from the r. The method call +// blocks until a message becomes available, or an error occurs. The program +// may also specify a context to asynchronously cancel the blocking operation. +// +// The method returns io.EOF to indicate that the reader has been closed. +// +// FetchMessage does not commit offsets automatically when using consumer groups. +// Use CommitMessages to commit the offset. +func (r *Reader) FetchMessage(ctx context.Context) (Message, error) { + r.activateReadLag() + + for { + r.mutex.Lock() + + if !r.closed && r.version == 0 { + r.start(r.getTopicPartitionOffset()) + } + + version := r.version + r.mutex.Unlock() + + select { + case <-ctx.Done(): + return Message{}, ctx.Err() + + case err := <-r.runError: + return Message{}, err + + case m, ok := <-r.msgs: + if !ok { + return Message{}, io.EOF + } + + if m.version >= version { + r.mutex.Lock() + + switch { + case m.error != nil: + case version == r.version: + r.offset = m.message.Offset + 1 + r.lag = m.watermark - r.offset + } + + r.mutex.Unlock() + + if errors.Is(m.error, io.EOF) { + // io.EOF is used as a marker to indicate that the stream + // has been closed, in case it was received from the inner + // reader we don't want to confuse the program and replace + // the error with io.ErrUnexpectedEOF. + m.error = io.ErrUnexpectedEOF + } + + return m.message, m.error + } + } + } +} + +// CommitMessages commits the list of messages passed as argument. The program +// may pass a context to asynchronously cancel the commit operation when it was +// configured to be blocking. +// +// Because kafka consumer groups track a single offset per partition, the +// highest message offset passed to CommitMessages will cause all previous +// messages to be committed. Applications need to account for these Kafka +// limitations when committing messages, and maintain message ordering if they +// need strong delivery guarantees. This property makes it valid to pass only +// the last message seen to CommitMessages in order to move the offset of the +// topic/partition it belonged to forward, effectively committing all previous +// messages in the partition. +func (r *Reader) CommitMessages(ctx context.Context, msgs ...Message) error { + if !r.useConsumerGroup() { + return errOnlyAvailableWithGroup + } + + var errch <-chan error + creq := commitRequest{ + commits: makeCommits(msgs...), + } + + if r.useSyncCommits() { + ch := make(chan error, 1) + errch, creq.errch = ch, ch + } + + select { + case r.commits <- creq: + case <-ctx.Done(): + return ctx.Err() + case <-r.stctx.Done(): + // This context is used to ensure we don't allow commits after the + // reader was closed. + return io.ErrClosedPipe + } + + if !r.useSyncCommits() { + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-errch: + return err + } +} + +// ReadLag returns the current lag of the reader by fetching the last offset of +// the topic and partition and computing the difference between that value and +// the offset of the last message returned by ReadMessage. +// +// This method is intended to be used in cases where a program may be unable to +// call ReadMessage to update the value returned by Lag, but still needs to get +// an up to date estimation of how far behind the reader is. For example when +// the consumer is not ready to process the next message. +// +// The function returns a lag of zero when the reader's current offset is +// negative. +func (r *Reader) ReadLag(ctx context.Context) (lag int64, err error) { + if r.useConsumerGroup() { + return 0, errNotAvailableWithGroup + } + + type offsets struct { + first int64 + last int64 + } + + offch := make(chan offsets, 1) + errch := make(chan error, 1) + + go func() { + var off offsets + var err error + + for _, broker := range r.config.Brokers { + var conn *Conn + + if conn, err = r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition); err != nil { + continue + } + + deadline, _ := ctx.Deadline() + conn.SetDeadline(deadline) + + off.first, off.last, err = conn.ReadOffsets() + conn.Close() + + if err == nil { + break + } + } + + if err != nil { + errch <- err + } else { + offch <- off + } + }() + + select { + case off := <-offch: + switch cur := r.Offset(); { + case cur == FirstOffset: + lag = off.last - off.first + + case cur == LastOffset: + lag = 0 + + default: + lag = off.last - cur + } + case err = <-errch: + case <-ctx.Done(): + err = ctx.Err() + } + + return +} + +// Offset returns the current absolute offset of the reader, or -1 +// if r is backed by a consumer group. +func (r *Reader) Offset() int64 { + if r.useConsumerGroup() { + return -1 + } + + r.mutex.Lock() + offset := r.offset + r.mutex.Unlock() + r.withLogger(func(log Logger) { + log.Printf("looking up offset of kafka reader for partition %d of %s: %s", r.config.Partition, r.config.Topic, toHumanOffset(offset)) + }) + return offset +} + +// Lag returns the lag of the last message returned by ReadMessage, or -1 +// if r is backed by a consumer group. +func (r *Reader) Lag() int64 { + if r.useConsumerGroup() { + return -1 + } + + r.mutex.Lock() + lag := r.lag + r.mutex.Unlock() + return lag +} + +// SetOffset changes the offset from which the next batch of messages will be +// read. The method fails with io.ErrClosedPipe if the reader has already been closed. +// +// From version 0.2.0, FirstOffset and LastOffset can be used to indicate the first +// or last available offset in the partition. Please note while -1 and -2 were accepted +// to indicate the first or last offset in previous versions, the meanings of the numbers +// were swapped in 0.2.0 to match the meanings in other libraries and the Kafka protocol +// specification. +func (r *Reader) SetOffset(offset int64) error { + if r.useConsumerGroup() { + return errNotAvailableWithGroup + } + + var err error + r.mutex.Lock() + + if r.closed { + err = io.ErrClosedPipe + } else if offset != r.offset { + r.withLogger(func(log Logger) { + log.Printf("setting the offset of the kafka reader for partition %d of %s from %s to %s", + r.config.Partition, r.config.Topic, toHumanOffset(r.offset), toHumanOffset(offset)) + }) + r.offset = offset + + if r.version != 0 { + r.start(r.getTopicPartitionOffset()) + } + + r.activateReadLag() + } + + r.mutex.Unlock() + return err +} + +// SetOffsetAt changes the offset from which the next batch of messages will be +// read given the timestamp t. +// +// The method fails if the unable to connect partition leader, or unable to read the offset +// given the ts, or if the reader has been closed. +func (r *Reader) SetOffsetAt(ctx context.Context, t time.Time) error { + r.mutex.Lock() + if r.closed { + r.mutex.Unlock() + return io.ErrClosedPipe + } + r.mutex.Unlock() + + if len(r.config.Brokers) < 1 { + return errors.New("no brokers in config") + } + var conn *Conn + var err error + for _, broker := range r.config.Brokers { + conn, err = r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition) + if err != nil { + continue + } + deadline, _ := ctx.Deadline() + conn.SetDeadline(deadline) + offset, err := conn.ReadOffset(t) + conn.Close() + if err != nil { + return err + } + + return r.SetOffset(offset) + } + return fmt.Errorf("error dialing all brokers, one of the errors: %w", err) +} + +// Stats returns a snapshot of the reader stats since the last time the method +// was called, or since the reader was created if it is called for the first +// time. +// +// A typical use of this method is to spawn a goroutine that will periodically +// call Stats on a kafka reader and report the metrics to a stats collection +// system. +func (r *Reader) Stats() ReaderStats { + stats := ReaderStats{ + Dials: r.stats.dials.snapshot(), + Fetches: r.stats.fetches.snapshot(), + Messages: r.stats.messages.snapshot(), + Bytes: r.stats.bytes.snapshot(), + Rebalances: r.stats.rebalances.snapshot(), + Timeouts: r.stats.timeouts.snapshot(), + Errors: r.stats.errors.snapshot(), + DialTime: r.stats.dialTime.snapshotDuration(), + ReadTime: r.stats.readTime.snapshotDuration(), + WaitTime: r.stats.waitTime.snapshotDuration(), + FetchSize: r.stats.fetchSize.snapshot(), + FetchBytes: r.stats.fetchBytes.snapshot(), + Offset: r.stats.offset.snapshot(), + Lag: r.stats.lag.snapshot(), + MinBytes: int64(r.config.MinBytes), + MaxBytes: int64(r.config.MaxBytes), + MaxWait: r.config.MaxWait, + QueueLength: int64(len(r.msgs)), + QueueCapacity: int64(cap(r.msgs)), + ClientID: r.config.Dialer.ClientID, + Topic: r.config.Topic, + Partition: r.stats.partition, + } + // TODO: remove when we get rid of the deprecated field. + stats.DeprecatedFetchesWithTypo = stats.Fetches + return stats +} + +func (r *Reader) getTopicPartitionOffset() map[topicPartition]int64 { + key := topicPartition{topic: r.config.Topic, partition: int32(r.config.Partition)} + return map[topicPartition]int64{key: r.offset} +} + +func (r *Reader) withLogger(do func(Logger)) { + if r.config.Logger != nil { + do(r.config.Logger) + } +} + +func (r *Reader) withErrorLogger(do func(Logger)) { + if r.config.ErrorLogger != nil { + do(r.config.ErrorLogger) + } else { + r.withLogger(do) + } +} + +func (r *Reader) activateReadLag() { + if r.config.ReadLagInterval > 0 && atomic.CompareAndSwapUint32(&r.once, 0, 1) { + // read lag will only be calculated when not using consumer groups + // todo discuss how capturing read lag should interact with rebalancing + if !r.useConsumerGroup() { + go r.readLag(r.stctx) + } + } +} + +func (r *Reader) readLag(ctx context.Context) { + ticker := time.NewTicker(r.config.ReadLagInterval) + defer ticker.Stop() + + for { + timeout, cancel := context.WithTimeout(ctx, r.config.ReadLagInterval/2) + lag, err := r.ReadLag(timeout) + cancel() + + if err != nil { + r.stats.errors.observe(1) + r.withErrorLogger(func(log Logger) { + log.Printf("kafka reader failed to read lag of partition %d of %s: %s", r.config.Partition, r.config.Topic, err) + }) + } else { + r.stats.lag.observe(lag) + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return + } + } +} + +func (r *Reader) start(offsetsByPartition map[topicPartition]int64) { + if r.closed { + // don't start child reader if parent Reader is closed + return + } + + ctx, cancel := context.WithCancel(context.Background()) + + r.cancel() // always cancel the previous reader + r.cancel = cancel + r.version++ + + r.join.Add(len(offsetsByPartition)) + for key, offset := range offsetsByPartition { + go func(ctx context.Context, key topicPartition, offset int64, join *sync.WaitGroup) { + defer join.Done() + + (&reader{ + dialer: r.config.Dialer, + logger: r.config.Logger, + errorLogger: r.config.ErrorLogger, + brokers: r.config.Brokers, + topic: key.topic, + partition: int(key.partition), + minBytes: r.config.MinBytes, + maxBytes: r.config.MaxBytes, + maxWait: r.config.MaxWait, + readBatchTimeout: r.config.ReadBatchTimeout, + backoffDelayMin: r.config.ReadBackoffMin, + backoffDelayMax: r.config.ReadBackoffMax, + version: r.version, + msgs: r.msgs, + stats: r.stats, + isolationLevel: r.config.IsolationLevel, + maxAttempts: r.config.MaxAttempts, + + // backwards-compatibility flags + offsetOutOfRangeError: r.config.OffsetOutOfRangeError, + }).run(ctx, offset) + }(ctx, key, offset, &r.join) + } +} + +// A reader reads messages from kafka and produces them on its channels, it's +// used as a way to asynchronously fetch messages while the main program reads +// them using the high level reader API. +type reader struct { + dialer *Dialer + logger Logger + errorLogger Logger + brokers []string + topic string + partition int + minBytes int + maxBytes int + maxWait time.Duration + readBatchTimeout time.Duration + backoffDelayMin time.Duration + backoffDelayMax time.Duration + version int64 + msgs chan<- readerMessage + stats *readerStats + isolationLevel IsolationLevel + maxAttempts int + + offsetOutOfRangeError bool +} + +type readerMessage struct { + version int64 + message Message + watermark int64 + error error +} + +func (r *reader) run(ctx context.Context, offset int64) { + // This is the reader's main loop, it only ends if the context is canceled + // and will keep attempting to reader messages otherwise. + // + // Retrying indefinitely has the nice side effect of preventing Read calls + // on the parent reader to block if connection to the kafka server fails, + // the reader keeps reporting errors on the error channel which will then + // be surfaced to the program. + // If the reader wasn't retrying then the program would block indefinitely + // on a Read call after reading the first error. + for attempt := 0; true; attempt++ { + if attempt != 0 { + if !sleep(ctx, backoff(attempt, r.backoffDelayMin, r.backoffDelayMax)) { + return + } + } + + r.withLogger(func(log Logger) { + log.Printf("initializing kafka reader for partition %d of %s starting at offset %d", r.partition, r.topic, toHumanOffset(offset)) + }) + + conn, start, err := r.initialize(ctx, offset) + if err != nil { + if errors.Is(err, OffsetOutOfRange) { + if r.offsetOutOfRangeError { + r.sendError(ctx, err) + return + } + + // This would happen if the requested offset is passed the last + // offset on the partition leader. In that case we're just going + // to retry later hoping that enough data has been produced. + r.withErrorLogger(func(log Logger) { + log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, err) + }) + + continue + } + + // Perform a configured number of attempts before + // reporting first errors, this helps mitigate + // situations where the kafka server is temporarily + // unavailable. + if attempt >= r.maxAttempts { + r.sendError(ctx, err) + } else { + r.stats.errors.observe(1) + r.withErrorLogger(func(log Logger) { + log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, err) + }) + } + continue + } + + // Resetting the attempt counter ensures that if a failure occurs after + // a successful initialization we don't keep increasing the backoff + // timeout. + attempt = 0 + + // Now we're sure to have an absolute offset number, may anything happen + // to the connection we know we'll want to restart from this offset. + offset = start + + errcount := 0 + readLoop: + for { + if !sleep(ctx, backoff(errcount, r.backoffDelayMin, r.backoffDelayMax)) { + conn.Close() + return + } + + offset, err = r.read(ctx, offset, conn) + switch { + case err == nil: + errcount = 0 + continue + + case errors.Is(err, io.EOF): + // done with this batch of messages...carry on. note that this + // block relies on the batch repackaging real io.EOF errors as + // io.UnexpectedEOF. otherwise, we would end up swallowing real + // errors here. + errcount = 0 + continue + + case errors.Is(err, io.ErrNoProgress): + // This error is returned by the Conn when it believes the connection + // has been corrupted, so we need to explicitly close it. Since we are + // explicitly handling it and a retry will pick up, we can suppress the + // error metrics and logs for this case. + conn.Close() + break readLoop + + case errors.Is(err, UnknownTopicOrPartition): + r.withErrorLogger(func(log Logger) { + log.Printf("failed to read from current broker %v for partition %d of %s at offset %d: %v", r.brokers, r.partition, r.topic, toHumanOffset(offset), err) + }) + + conn.Close() + + // The next call to .initialize will re-establish a connection to the proper + // topic/partition broker combo. + r.stats.rebalances.observe(1) + break readLoop + + case errors.Is(err, NotLeaderForPartition): + r.withErrorLogger(func(log Logger) { + log.Printf("failed to read from current broker for partition %d of %s at offset %d: %v", r.partition, r.topic, toHumanOffset(offset), err) + }) + + conn.Close() + + // The next call to .initialize will re-establish a connection to the proper + // partition leader. + r.stats.rebalances.observe(1) + break readLoop + + case errors.Is(err, RequestTimedOut): + // Timeout on the kafka side, this can be safely retried. + errcount = 0 + r.withLogger(func(log Logger) { + log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d: %v", r.partition, r.topic, toHumanOffset(offset), err) + }) + r.stats.timeouts.observe(1) + continue + + case errors.Is(err, OffsetOutOfRange): + first, last, err := r.readOffsets(conn) + if err != nil { + r.withErrorLogger(func(log Logger) { + log.Printf("the kafka reader got an error while attempting to determine whether it was reading before the first offset or after the last offset of partition %d of %s: %s", r.partition, r.topic, err) + }) + conn.Close() + break readLoop + } + + switch { + case offset < first: + r.withErrorLogger(func(log Logger) { + log.Printf("the kafka reader is reading before the first offset for partition %d of %s, skipping from offset %d to %d (%d messages)", r.partition, r.topic, toHumanOffset(offset), first, first-offset) + }) + offset, errcount = first, 0 + continue // retry immediately so we don't keep falling behind due to the backoff + + case offset < last: + errcount = 0 + continue // more messages have already become available, retry immediately + + default: + // We may be reading past the last offset, will retry later. + r.withErrorLogger(func(log Logger) { + log.Printf("the kafka reader is reading passed the last offset for partition %d of %s at offset %d", r.partition, r.topic, toHumanOffset(offset)) + }) + } + + case errors.Is(err, context.Canceled): + // Another reader has taken over, we can safely quit. + conn.Close() + return + + case errors.Is(err, errUnknownCodec): + // The compression codec is either unsupported or has not been + // imported. This is a fatal error b/c the reader cannot + // proceed. + r.sendError(ctx, err) + break readLoop + + default: + var kafkaError Error + if errors.As(err, &kafkaError) { + r.sendError(ctx, err) + } else { + r.withErrorLogger(func(log Logger) { + log.Printf("the kafka reader got an unknown error reading partition %d of %s at offset %d: %s", r.partition, r.topic, toHumanOffset(offset), err) + }) + r.stats.errors.observe(1) + conn.Close() + break readLoop + } + } + + errcount++ + } + } +} + +func (r *reader) initialize(ctx context.Context, offset int64) (conn *Conn, start int64, err error) { + for i := 0; i != len(r.brokers) && conn == nil; i++ { + broker := r.brokers[i] + var first, last int64 + + t0 := time.Now() + conn, err = r.dialer.DialLeader(ctx, "tcp", broker, r.topic, r.partition) + t1 := time.Now() + r.stats.dials.observe(1) + r.stats.dialTime.observeDuration(t1.Sub(t0)) + + if err != nil { + continue + } + + if first, last, err = r.readOffsets(conn); err != nil { + conn.Close() + conn = nil + break + } + + switch { + case offset == FirstOffset: + offset = first + + case offset == LastOffset: + offset = last + + case offset < first: + offset = first + } + + r.withLogger(func(log Logger) { + log.Printf("the kafka reader for partition %d of %s is seeking to offset %d", r.partition, r.topic, toHumanOffset(offset)) + }) + + if start, err = conn.Seek(offset, SeekAbsolute); err != nil { + conn.Close() + conn = nil + break + } + + conn.SetDeadline(time.Time{}) + } + + return +} + +func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, error) { + r.stats.fetches.observe(1) + r.stats.offset.observe(offset) + + t0 := time.Now() + conn.SetReadDeadline(t0.Add(r.maxWait)) + + batch := conn.ReadBatchWith(ReadBatchConfig{ + MinBytes: r.minBytes, + MaxBytes: r.maxBytes, + IsolationLevel: r.isolationLevel, + }) + highWaterMark := batch.HighWaterMark() + + t1 := time.Now() + r.stats.waitTime.observeDuration(t1.Sub(t0)) + + var msg Message + var err error + var size int64 + var bytes int64 + + for { + conn.SetReadDeadline(time.Now().Add(r.readBatchTimeout)) + + if msg, err = batch.ReadMessage(); err != nil { + batch.Close() + break + } + + n := int64(len(msg.Key) + len(msg.Value)) + r.stats.messages.observe(1) + r.stats.bytes.observe(n) + + if err = r.sendMessage(ctx, msg, highWaterMark); err != nil { + batch.Close() + break + } + + offset = msg.Offset + 1 + r.stats.offset.observe(offset) + r.stats.lag.observe(highWaterMark - offset) + + size++ + bytes += n + } + + conn.SetReadDeadline(time.Time{}) + + t2 := time.Now() + r.stats.readTime.observeDuration(t2.Sub(t1)) + r.stats.fetchSize.observe(size) + r.stats.fetchBytes.observe(bytes) + return offset, err +} + +func (r *reader) readOffsets(conn *Conn) (first, last int64, err error) { + conn.SetDeadline(time.Now().Add(10 * time.Second)) + return conn.ReadOffsets() +} + +func (r *reader) sendMessage(ctx context.Context, msg Message, watermark int64) error { + select { + case r.msgs <- readerMessage{version: r.version, message: msg, watermark: watermark}: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (r *reader) sendError(ctx context.Context, err error) error { + select { + case r.msgs <- readerMessage{version: r.version, error: err}: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (r *reader) withLogger(do func(Logger)) { + if r.logger != nil { + do(r.logger) + } +} + +func (r *reader) withErrorLogger(do func(Logger)) { + if r.errorLogger != nil { + do(r.errorLogger) + } else { + r.withLogger(do) + } +} + +// extractTopics returns the unique list of topics represented by the set of +// provided members. +func extractTopics(members []GroupMember) []string { + visited := map[string]struct{}{} + var topics []string + + for _, member := range members { + for _, topic := range member.Topics { + if _, seen := visited[topic]; seen { + continue + } + + topics = append(topics, topic) + visited[topic] = struct{}{} + } + } + + sort.Strings(topics) + + return topics +} + +type humanOffset int64 + +func toHumanOffset(v int64) humanOffset { + return humanOffset(v) +} + +func (offset humanOffset) Format(w fmt.State, _ rune) { + v := int64(offset) + switch v { + case FirstOffset: + fmt.Fprint(w, "first offset") + case LastOffset: + fmt.Fprint(w, "last offset") + default: + fmt.Fprint(w, strconv.FormatInt(v, 10)) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/record.go b/vendor/github.com/segmentio/kafka-go/record.go new file mode 100644 index 00000000000..1750889ac1a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/record.go @@ -0,0 +1,42 @@ +package kafka + +import ( + "github.com/segmentio/kafka-go/protocol" +) + +// Header is a key/value pair type representing headers set on records. +type Header = protocol.Header + +// Bytes is an interface representing a sequence of bytes. This abstraction +// makes it possible for programs to inject data into produce requests without +// having to load in into an intermediary buffer, or read record keys and values +// from a fetch response directly from internal buffers. +// +// Bytes are not safe to use concurrently from multiple goroutines. +type Bytes = protocol.Bytes + +// NewBytes constructs a Bytes value from a byte slice. +// +// If b is nil, nil is returned. +func NewBytes(b []byte) Bytes { return protocol.NewBytes(b) } + +// ReadAll reads b into a byte slice. +func ReadAll(b Bytes) ([]byte, error) { return protocol.ReadAll(b) } + +// Record is an interface representing a single kafka record. +// +// Record values are not safe to use concurrently from multiple goroutines. +type Record = protocol.Record + +// RecordReader is an interface representing a sequence of records. Record sets +// are used in both produce and fetch requests to represent the sequence of +// records that are sent to or receive from kafka brokers. +// +// RecordReader values are not safe to use concurrently from multiple goroutines. +type RecordReader = protocol.RecordReader + +// NewRecordReade reconstructs a RecordSet which exposes the sequence of records +// passed as arguments. +func NewRecordReader(records ...Record) RecordReader { + return protocol.NewRecordReader(records...) +} diff --git a/vendor/github.com/segmentio/kafka-go/recordbatch.go b/vendor/github.com/segmentio/kafka-go/recordbatch.go new file mode 100644 index 00000000000..59ab4937bb3 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/recordbatch.go @@ -0,0 +1,108 @@ +package kafka + +import ( + "bytes" + "time" +) + +const recordBatchHeaderSize int32 = 0 + + 8 + // base offset + 4 + // batch length + 4 + // partition leader epoch + 1 + // magic + 4 + // crc + 2 + // attributes + 4 + // last offset delta + 8 + // first timestamp + 8 + // max timestamp + 8 + // producer id + 2 + // producer epoch + 4 + // base sequence + 4 // msg count + +func recordBatchSize(msgs ...Message) (size int32) { + size = recordBatchHeaderSize + baseTime := msgs[0].Time + + for i := range msgs { + msg := &msgs[i] + msz := recordSize(msg, msg.Time.Sub(baseTime), int64(i)) + size += int32(msz + varIntLen(int64(msz))) + } + + return +} + +func compressRecordBatch(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int16, size int32, err error) { + compressed = acquireBuffer() + compressor := codec.NewWriter(compressed) + wb := &writeBuffer{w: compressor} + + for i, msg := range msgs { + wb.writeRecord(0, msgs[0].Time, int64(i), msg) + } + + if err = compressor.Close(); err != nil { + releaseBuffer(compressed) + return + } + + attributes = int16(codec.Code()) + size = recordBatchHeaderSize + int32(compressed.Len()) + return +} + +type recordBatch struct { + // required input parameters + codec CompressionCodec + attributes int16 + msgs []Message + + // parameters calculated during init + compressed *bytes.Buffer + size int32 +} + +func newRecordBatch(codec CompressionCodec, msgs ...Message) (r *recordBatch, err error) { + r = &recordBatch{ + codec: codec, + msgs: msgs, + } + if r.codec == nil { + r.size = recordBatchSize(r.msgs...) + } else { + r.compressed, r.attributes, r.size, err = compressRecordBatch(r.codec, r.msgs...) + } + return +} + +func (r *recordBatch) writeTo(wb *writeBuffer) { + wb.writeInt32(r.size) + + baseTime := r.msgs[0].Time + lastTime := r.msgs[len(r.msgs)-1].Time + if r.compressed != nil { + wb.writeRecordBatch(r.attributes, r.size, len(r.msgs), baseTime, lastTime, func(wb *writeBuffer) { + wb.Write(r.compressed.Bytes()) + }) + releaseBuffer(r.compressed) + } else { + wb.writeRecordBatch(r.attributes, r.size, len(r.msgs), baseTime, lastTime, func(wb *writeBuffer) { + for i, msg := range r.msgs { + wb.writeRecord(0, r.msgs[0].Time, int64(i), msg) + } + }) + } +} + +func recordSize(msg *Message, timestampDelta time.Duration, offsetDelta int64) int { + return 1 + // attributes + varIntLen(int64(milliseconds(timestampDelta))) + + varIntLen(offsetDelta) + + varBytesLen(msg.Key) + + varBytesLen(msg.Value) + + varArrayLen(len(msg.Headers), func(i int) int { + h := &msg.Headers[i] + return varStringLen(h.Key) + varBytesLen(h.Value) + }) +} diff --git a/vendor/github.com/segmentio/kafka-go/resolver.go b/vendor/github.com/segmentio/kafka-go/resolver.go new file mode 100644 index 00000000000..fa5b97d7014 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/resolver.go @@ -0,0 +1,57 @@ +package kafka + +import ( + "context" + "net" +) + +// The Resolver interface is used as an abstraction to provide service discovery +// of the hosts of a kafka cluster. +type Resolver interface { + // LookupHost looks up the given host using the local resolver. + // It returns a slice of that host's addresses. + LookupHost(ctx context.Context, host string) (addrs []string, err error) +} + +// BrokerResolver is an interface implemented by types that translate host +// names into a network address. +// +// This resolver is not intended to be a general purpose interface. Instead, +// it is tailored to the particular needs of the kafka protocol, with the goal +// being to provide a flexible mechanism for extending broker name resolution +// while retaining context that is specific to interacting with a kafka cluster. +// +// Resolvers must be safe to use from multiple goroutines. +type BrokerResolver interface { + // Returns the IP addresses of the broker passed as argument. + LookupBrokerIPAddr(ctx context.Context, broker Broker) ([]net.IPAddr, error) +} + +// NewBrokerResolver constructs a Resolver from r. +// +// If r is nil, net.DefaultResolver is used instead. +func NewBrokerResolver(r *net.Resolver) BrokerResolver { + return brokerResolver{r} +} + +type brokerResolver struct { + *net.Resolver +} + +func (r brokerResolver) LookupBrokerIPAddr(ctx context.Context, broker Broker) ([]net.IPAddr, error) { + ipAddrs, err := r.LookupIPAddr(ctx, broker.Host) + if err != nil { + return nil, err + } + + if len(ipAddrs) == 0 { + return nil, &net.DNSError{ + Err: "no addresses were returned by the resolver", + Name: broker.Host, + IsTemporary: true, + IsNotFound: true, + } + } + + return ipAddrs, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/resource.go b/vendor/github.com/segmentio/kafka-go/resource.go new file mode 100644 index 00000000000..f5c2e73a526 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/resource.go @@ -0,0 +1,37 @@ +package kafka + +// https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java +type ResourceType int8 + +const ( + ResourceTypeUnknown ResourceType = 0 + ResourceTypeAny ResourceType = 1 + ResourceTypeTopic ResourceType = 2 + ResourceTypeGroup ResourceType = 3 + // See https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/config/ConfigResource.java#L36 + ResourceTypeBroker ResourceType = 4 + ResourceTypeCluster ResourceType = 4 + ResourceTypeTransactionalID ResourceType = 5 + ResourceTypeDelegationToken ResourceType = 6 +) + +// https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java +type PatternType int8 + +const ( + // PatternTypeUnknown represents any PatternType which this client cannot + // understand. + PatternTypeUnknown PatternType = 0 + // PatternTypeAny matches any resource pattern type. + PatternTypeAny PatternType = 1 + // PatternTypeMatch perform pattern matching. + PatternTypeMatch PatternType = 2 + // PatternTypeLiteral represents a literal name. + // A literal name defines the full name of a resource, e.g. topic with name + // 'foo', or group with name 'bob'. + PatternTypeLiteral PatternType = 3 + // PatternTypePrefixed represents a prefixed name. + // A prefixed name defines a prefix for a resource, e.g. topics with names + // that start with 'foo'. + PatternTypePrefixed PatternType = 4 +) diff --git a/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/LICENSE b/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/LICENSE new file mode 100644 index 00000000000..09e136c5100 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/README.md b/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/README.md new file mode 100644 index 00000000000..e3b6c537bc6 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/README.md @@ -0,0 +1,15 @@ +# AWS MSK IAM V2 + +This extension provides a capability to get authenticated with [AWS Managed Apache Kafka](https://aws.amazon.com/msk/) +through AWS IAM. + +## How to use + +This module is an extension for MSK users and thus this is isolated from `kafka-go` module. +You can add this module to your dependency by running the command below. + +```shell +go get github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 +``` + +Please find the sample code in [example_test.go](./example_test.go), you can use the `Mechanism` for SASL authentication of `Reader` and `Writer`. diff --git a/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/msk_iam.go b/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/msk_iam.go new file mode 100644 index 00000000000..7fa49337f13 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2/msk_iam.go @@ -0,0 +1,180 @@ +package aws_msk_iam_v2 + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/url" + "runtime" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/segmentio/kafka-go/sasl" +) + +const ( + // These constants come from https://github.com/aws/aws-msk-iam-auth#details and + // https://github.com/aws/aws-msk-iam-auth/blob/main/src/main/java/software/amazon/msk/auth/iam/internals/AWS4SignedPayloadGenerator.java. + signAction = "kafka-cluster:Connect" + signPayload = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" // the hex encoded SHA-256 of an empty string + signService = "kafka-cluster" + signVersion = "2020_10_22" + signActionKey = "action" + signHostKey = "host" + signUserAgentKey = "user-agent" + signVersionKey = "version" + queryActionKey = "Action" + queryExpiryKey = "X-Amz-Expires" +) + +var signUserAgent = "kafka-go/sasl/aws_msk_iam_v2/" + runtime.Version() + +// Mechanism implements sasl.Mechanism for the AWS_MSK_IAM mechanism, based on the official java implementation: +// https://github.com/aws/aws-msk-iam-auth +type Mechanism struct { + // The sigv4.Signer of aws-sdk-go-v2 to use when signing the request. Required. + Signer *signer.Signer + // The aws.Config.Credentials or config.CredentialsProvider of aws-sdk-go-v2. Required. + Credentials aws.CredentialsProvider + // The region where the msk cluster is hosted, e.g. "us-east-1". Required. + Region string + // The time the request is planned for. Optional, defaults to time.Now() at time of authentication. + SignTime time.Time + // The duration for which the presigned request is active. Optional, defaults to 5 minutes. + Expiry time.Duration +} + +func (m *Mechanism) Name() string { + return "AWS_MSK_IAM" +} + +func (m *Mechanism) Next(ctx context.Context, challenge []byte) (bool, []byte, error) { + // After the initial step, the authentication is complete + // kafka will return error if it rejected the credentials, so we'll only + // arrive here on success. + return true, nil, nil +} + +// Start produces the authentication values required for AWS_MSK_IAM. It produces the following json as a byte array, +// making use of the aws-sdk to produce the signed output. +// +// { +// "version" : "2020_10_22", +// "host" : "", +// "user-agent": "", +// "action": "kafka-cluster:Connect", +// "x-amz-algorithm" : "", +// "x-amz-credential" : "///kafka-cluster/aws4_request", +// "x-amz-date" : "", +// "x-amz-security-token" : "", +// "x-amz-signedheaders" : "host", +// "x-amz-expires" : "", +// "x-amz-signature" : "" +// } +func (m *Mechanism) Start(ctx context.Context) (sess sasl.StateMachine, ir []byte, err error) { + signedMap, err := m.preSign(ctx) + if err != nil { + return nil, nil, err + } + + signedJson, err := json.Marshal(signedMap) + return m, signedJson, err +} + +// preSign produces the authentication values required for AWS_MSK_IAM. +func (m *Mechanism) preSign(ctx context.Context) (map[string]string, error) { + req, err := buildReq(ctx, defaultExpiry(m.Expiry)) + if err != nil { + return nil, err + } + + creds, err := m.Credentials.Retrieve(ctx) + if err != nil { + return nil, err + } + + signedUrl, header, err := m.Signer.PresignHTTP(ctx, creds, req, signPayload, signService, m.Region, defaultSignTime(m.SignTime)) + if err != nil { + return nil, err + } + + u, err := url.Parse(signedUrl) + if err != nil { + return nil, err + } + return buildSignedMap(u, header), nil +} + +// buildReq builds http.Request for aws PreSign. +func buildReq(ctx context.Context, expiry time.Duration) (*http.Request, error) { + query := url.Values{ + queryActionKey: {signAction}, + queryExpiryKey: {strconv.FormatInt(int64(expiry/time.Second), 10)}, + } + saslMeta := sasl.MetadataFromContext(ctx) + if saslMeta == nil { + return nil, errors.New("missing sasl metadata") + } + + signUrl := url.URL{ + Scheme: "kafka", + Host: saslMeta.Host, + Path: "/", + RawQuery: query.Encode(), + } + + req, err := http.NewRequest(http.MethodGet, signUrl.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// buildSignedMap builds signed string map which will be used to authenticate with MSK. +func buildSignedMap(u *url.URL, header http.Header) map[string]string { + signedMap := map[string]string{ + signVersionKey: signVersion, + signHostKey: u.Host, + signUserAgentKey: signUserAgent, + signActionKey: signAction, + } + // The protocol requires lowercase keys. + for key, vals := range header { + signedMap[strings.ToLower(key)] = vals[0] + } + for key, vals := range u.Query() { + signedMap[strings.ToLower(key)] = vals[0] + } + + return signedMap +} + +// defaultExpiry set default expiration time if user doesn't define Mechanism.Expiry. +func defaultExpiry(v time.Duration) time.Duration { + if v == 0 { + return 5 * time.Minute + } + return v +} + +// defaultSignTime set default sign time if user doesn't define Mechanism.SignTime. +func defaultSignTime(v time.Time) time.Time { + if v.IsZero() { + return time.Now() + } + return v +} + +// NewMechanism provides +func NewMechanism(awsCfg aws.Config) *Mechanism { + return &Mechanism{ + Signer: signer.NewSigner(), + Credentials: awsCfg.Credentials, + Region: awsCfg.Region, + } +} diff --git a/vendor/github.com/segmentio/kafka-go/sasl/plain/plain.go b/vendor/github.com/segmentio/kafka-go/sasl/plain/plain.go new file mode 100644 index 00000000000..10c7632d238 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/plain/plain.go @@ -0,0 +1,30 @@ +package plain + +import ( + "context" + "fmt" + + "github.com/segmentio/kafka-go/sasl" +) + +// Mechanism implements the PLAIN mechanism and passes the credentials in clear +// text. +type Mechanism struct { + Username string + Password string +} + +func (Mechanism) Name() string { + return "PLAIN" +} + +func (m Mechanism) Start(ctx context.Context) (sasl.StateMachine, []byte, error) { + // Mechanism is stateless, so it can also implement sasl.Session + return m, []byte(fmt.Sprintf("\x00%s\x00%s", m.Username, m.Password)), nil +} + +func (m Mechanism) Next(ctx context.Context, challenge []byte) (bool, []byte, error) { + // kafka will return error if it rejected the credentials, so we'd only + // arrive here on success. + return true, nil, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/sasl/sasl.go b/vendor/github.com/segmentio/kafka-go/sasl/sasl.go new file mode 100644 index 00000000000..4056d1f3c2a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/sasl.go @@ -0,0 +1,65 @@ +package sasl + +import "context" + +type ctxKey struct{} + +// Mechanism implements the SASL state machine for a particular mode of +// authentication. It is used by the kafka.Dialer to perform the SASL +// handshake. +// +// A Mechanism must be re-usable and safe for concurrent access by multiple +// goroutines. +type Mechanism interface { + // Name returns the identifier for this SASL mechanism. This string will be + // passed to the SASL handshake request and much match one of the mechanisms + // supported by Kafka. + Name() string + + // Start begins SASL authentication. It returns an authentication state + // machine and "initial response" data (if required by the selected + // mechanism). A non-nil error causes the client to abort the authentication + // attempt. + // + // A nil ir value is different from a zero-length value. The nil value + // indicates that the selected mechanism does not use an initial response, + // while a zero-length value indicates an empty initial response, which must + // be sent to the server. + Start(ctx context.Context) (sess StateMachine, ir []byte, err error) +} + +// StateMachine implements the SASL challenge/response flow for a single SASL +// handshake. A StateMachine will be created by the Mechanism per connection, +// so it does not need to be safe for concurrent access by multiple goroutines. +// +// Once the StateMachine is created by the Mechanism, the caller loops by +// passing the server's response into Next and then sending Next's returned +// bytes to the server. Eventually either Next will indicate that the +// authentication has been successfully completed via the done return value, or +// it will indicate that the authentication failed by returning a non-nil error. +type StateMachine interface { + // Next continues challenge-response authentication. A non-nil error + // indicates that the client should abort the authentication attempt. If + // the client has been successfully authenticated, then the done return + // value will be true. + Next(ctx context.Context, challenge []byte) (done bool, response []byte, err error) +} + +// Metadata contains additional data for performing SASL authentication. +type Metadata struct { + // Host is the address of the broker the authentication will be + // performed on. + Host string + Port int +} + +// WithMetadata returns a copy of the context with associated Metadata. +func WithMetadata(ctx context.Context, m *Metadata) context.Context { + return context.WithValue(ctx, ctxKey{}, m) +} + +// MetadataFromContext retrieves the Metadata from the context. +func MetadataFromContext(ctx context.Context) *Metadata { + m, _ := ctx.Value(ctxKey{}).(*Metadata) + return m +} diff --git a/vendor/github.com/segmentio/kafka-go/sasl/scram/scram.go b/vendor/github.com/segmentio/kafka-go/sasl/scram/scram.go new file mode 100644 index 00000000000..b29885f32d8 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/scram/scram.go @@ -0,0 +1,91 @@ +package scram + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "hash" + + "github.com/segmentio/kafka-go/sasl" + "github.com/xdg-go/scram" +) + +// Algorithm determines the hash function used by SCRAM to protect the user's +// credentials. +type Algorithm interface { + // Name returns the algorithm's name, e.g. "SCRAM-SHA-256" + Name() string + + // Hash returns a new hash.Hash. + Hash() hash.Hash +} + +type sha256Algo struct{} + +func (sha256Algo) Name() string { + return "SCRAM-SHA-256" +} + +func (sha256Algo) Hash() hash.Hash { + return sha256.New() +} + +type sha512Algo struct{} + +func (sha512Algo) Name() string { + return "SCRAM-SHA-512" +} + +func (sha512Algo) Hash() hash.Hash { + return sha512.New() +} + +var ( + SHA256 Algorithm = sha256Algo{} + SHA512 Algorithm = sha512Algo{} +) + +type mechanism struct { + algo Algorithm + client *scram.Client +} + +type session struct { + convo *scram.ClientConversation +} + +// Mechanism returns a new sasl.Mechanism that will use SCRAM with the provided +// Algorithm to securely transmit the provided credentials to Kafka. +// +// SCRAM-SHA-256 and SCRAM-SHA-512 were added to Kafka in 0.10.2.0. These +// mechanisms will not work with older versions. +func Mechanism(algo Algorithm, username, password string) (sasl.Mechanism, error) { + hashGen := scram.HashGeneratorFcn(algo.Hash) + client, err := hashGen.NewClient(username, password, "") + if err != nil { + return nil, err + } + + return &mechanism{ + algo: algo, + client: client, + }, nil +} + +func (m *mechanism) Name() string { + return m.algo.Name() +} + +func (m *mechanism) Start(ctx context.Context) (sasl.StateMachine, []byte, error) { + convo := m.client.NewConversation() + str, err := convo.Step("") + if err != nil { + return nil, nil, err + } + return &session{convo: convo}, []byte(str), nil +} + +func (s *session) Next(ctx context.Context, challenge []byte) (bool, []byte, error) { + str, err := s.convo.Step(string(challenge)) + return s.convo.Done(), []byte(str), err +} diff --git a/vendor/github.com/segmentio/kafka-go/saslauthenticate.go b/vendor/github.com/segmentio/kafka-go/saslauthenticate.go new file mode 100644 index 00000000000..ad129291891 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/saslauthenticate.go @@ -0,0 +1,54 @@ +package kafka + +import ( + "bufio" +) + +type saslAuthenticateRequestV0 struct { + // Data holds the SASL payload + Data []byte +} + +func (t saslAuthenticateRequestV0) size() int32 { + return sizeofBytes(t.Data) +} + +func (t *saslAuthenticateRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + return readBytes(r, sz, &t.Data) +} + +func (t saslAuthenticateRequestV0) writeTo(wb *writeBuffer) { + wb.writeBytes(t.Data) +} + +type saslAuthenticateResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + ErrorMessage string + + Data []byte +} + +func (t saslAuthenticateResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + sizeofString(t.ErrorMessage) + sizeofBytes(t.Data) +} + +func (t saslAuthenticateResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeString(t.ErrorMessage) + wb.writeBytes(t.Data) +} + +func (t *saslAuthenticateResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + if remain, err = readString(r, remain, &t.ErrorMessage); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.Data); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/saslhandshake.go b/vendor/github.com/segmentio/kafka-go/saslhandshake.go new file mode 100644 index 00000000000..3ffaee0f949 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/saslhandshake.go @@ -0,0 +1,53 @@ +package kafka + +import ( + "bufio" +) + +// saslHandshakeRequestV0 implements the format for V0 and V1 SASL +// requests (they are identical). +type saslHandshakeRequestV0 struct { + // Mechanism holds the SASL Mechanism chosen by the client. + Mechanism string +} + +func (t saslHandshakeRequestV0) size() int32 { + return sizeofString(t.Mechanism) +} + +func (t *saslHandshakeRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + return readString(r, sz, &t.Mechanism) +} + +func (t saslHandshakeRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.Mechanism) +} + +// saslHandshakeResponseV0 implements the format for V0 and V1 SASL +// responses (they are identical). +type saslHandshakeResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // Array of mechanisms enabled in the server + EnabledMechanisms []string +} + +func (t saslHandshakeResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + sizeofStringArray(t.EnabledMechanisms) +} + +func (t saslHandshakeResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeStringArray(t.EnabledMechanisms) +} + +func (t *saslHandshakeResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + if remain, err = readStringArray(r, remain, &t.EnabledMechanisms); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/sizeof.go b/vendor/github.com/segmentio/kafka-go/sizeof.go new file mode 100644 index 00000000000..48ab469d7ac --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sizeof.go @@ -0,0 +1,72 @@ +package kafka + +import "fmt" + +type sizable interface { + size() int32 +} + +func sizeof(a interface{}) int32 { + switch v := a.(type) { + case int8: + return 1 + case int16: + return 2 + case int32: + return 4 + case int64: + return 8 + case string: + return sizeofString(v) + case bool: + return 1 + case []byte: + return sizeofBytes(v) + case sizable: + return v.size() + } + panic(fmt.Sprintf("unsupported type: %T", a)) +} + +func sizeofInt16(_ int16) int32 { + return 2 +} + +func sizeofInt32(_ int32) int32 { + return 4 +} + +func sizeofInt64(_ int64) int32 { + return 8 +} + +func sizeofString(s string) int32 { + return 2 + int32(len(s)) +} + +func sizeofNullableString(s *string) int32 { + if s == nil { + return 2 + } + return sizeofString(*s) +} + +func sizeofBytes(b []byte) int32 { + return 4 + int32(len(b)) +} + +func sizeofArray(n int, f func(int) int32) int32 { + s := int32(4) + for i := 0; i != n; i++ { + s += f(i) + } + return s +} + +func sizeofInt32Array(a []int32) int32 { + return 4 + (4 * int32(len(a))) +} + +func sizeofStringArray(a []string) int32 { + return sizeofArray(len(a), func(i int) int32 { return sizeofString(a[i]) }) +} diff --git a/vendor/github.com/segmentio/kafka-go/stats.go b/vendor/github.com/segmentio/kafka-go/stats.go new file mode 100644 index 00000000000..ef1e582cb3a --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/stats.go @@ -0,0 +1,189 @@ +package kafka + +import ( + "sync/atomic" + "time" +) + +// SummaryStats is a data structure that carries a summary of observed values. +type SummaryStats struct { + Avg int64 `metric:"avg" type:"gauge"` + Min int64 `metric:"min" type:"gauge"` + Max int64 `metric:"max" type:"gauge"` + Count int64 `metric:"count" type:"counter"` + Sum int64 `metric:"sum" type:"counter"` +} + +// DurationStats is a data structure that carries a summary of observed duration values. +type DurationStats struct { + Avg time.Duration `metric:"avg" type:"gauge"` + Min time.Duration `metric:"min" type:"gauge"` + Max time.Duration `metric:"max" type:"gauge"` + Count int64 `metric:"count" type:"counter"` + Sum time.Duration `metric:"sum" type:"counter"` +} + +// counter is an atomic incrementing counter which gets reset on snapshot. +// +// Since atomic is used to mutate the statistic the value must be 64-bit aligned. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type counter int64 + +func (c *counter) ptr() *int64 { + return (*int64)(c) +} + +func (c *counter) observe(v int64) { + atomic.AddInt64(c.ptr(), v) +} + +func (c *counter) snapshot() int64 { + return atomic.SwapInt64(c.ptr(), 0) +} + +// gauge is an atomic integer that may be set to any arbitrary value, the value +// does not change after a snapshot. +// +// Since atomic is used to mutate the statistic the value must be 64-bit aligned. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type gauge int64 + +func (g *gauge) ptr() *int64 { + return (*int64)(g) +} + +func (g *gauge) observe(v int64) { + atomic.StoreInt64(g.ptr(), v) +} + +func (g *gauge) snapshot() int64 { + return atomic.LoadInt64(g.ptr()) +} + +// minimum is an atomic integral type that keeps track of the minimum of all +// values that it observed between snapshots. +// +// Since atomic is used to mutate the statistic the value must be 64-bit aligned. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type minimum int64 + +func (m *minimum) ptr() *int64 { + return (*int64)(m) +} + +func (m *minimum) observe(v int64) { + for { + ptr := m.ptr() + min := atomic.LoadInt64(ptr) + + if min >= 0 && min <= v { + break + } + + if atomic.CompareAndSwapInt64(ptr, min, v) { + break + } + } +} + +func (m *minimum) snapshot() int64 { + p := m.ptr() + v := atomic.LoadInt64(p) + atomic.CompareAndSwapInt64(p, v, -1) + if v < 0 { + v = 0 + } + return v +} + +// maximum is an atomic integral type that keeps track of the maximum of all +// values that it observed between snapshots. +// +// Since atomic is used to mutate the statistic the value must be 64-bit aligned. +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type maximum int64 + +func (m *maximum) ptr() *int64 { + return (*int64)(m) +} + +func (m *maximum) observe(v int64) { + for { + ptr := m.ptr() + max := atomic.LoadInt64(ptr) + + if max >= 0 && max >= v { + break + } + + if atomic.CompareAndSwapInt64(ptr, max, v) { + break + } + } +} + +func (m *maximum) snapshot() int64 { + p := m.ptr() + v := atomic.LoadInt64(p) + atomic.CompareAndSwapInt64(p, v, -1) + if v < 0 { + v = 0 + } + return v +} + +type summary struct { + min minimum + max maximum + sum counter + count counter +} + +func makeSummary() summary { + return summary{ + min: -1, + max: -1, + } +} + +func (s *summary) observe(v int64) { + s.min.observe(v) + s.max.observe(v) + s.sum.observe(v) + s.count.observe(1) +} + +func (s *summary) observeDuration(v time.Duration) { + s.observe(int64(v)) +} + +func (s *summary) snapshot() SummaryStats { + avg := int64(0) + min := s.min.snapshot() + max := s.max.snapshot() + sum := s.sum.snapshot() + count := s.count.snapshot() + + if count != 0 { + avg = int64(float64(sum) / float64(count)) + } + + return SummaryStats{ + Avg: avg, + Min: min, + Max: max, + Count: count, + Sum: sum, + } +} + +func (s *summary) snapshotDuration() DurationStats { + summary := s.snapshot() + return DurationStats{ + Avg: time.Duration(summary.Avg), + Min: time.Duration(summary.Min), + Max: time.Duration(summary.Max), + Count: summary.Count, + Sum: time.Duration(summary.Sum), + } +} diff --git a/vendor/github.com/segmentio/kafka-go/syncgroup.go b/vendor/github.com/segmentio/kafka-go/syncgroup.go new file mode 100644 index 00000000000..ff37569e7cd --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/syncgroup.go @@ -0,0 +1,288 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/consumer" + "github.com/segmentio/kafka-go/protocol/syncgroup" +) + +// SyncGroupRequest is the request structure for the SyncGroup function. +type SyncGroupRequest struct { + // Address of the kafka broker to sent he request to. + Addr net.Addr + + // GroupID of the group to sync. + GroupID string + + // The generation of the group. + GenerationID int + + // The member ID assigned by the group. + MemberID string + + // The unique identifier for the consumer instance. + GroupInstanceID string + + // The name for the class of protocols implemented by the group being joined. + ProtocolType string + + // The group protocol name. + ProtocolName string + + // The group member assignments. + Assignments []SyncGroupRequestAssignment +} + +// SyncGroupRequestAssignment represents an assignement for a goroup memeber. +type SyncGroupRequestAssignment struct { + // The ID of the member to assign. + MemberID string + + // The member assignment. + Assignment GroupProtocolAssignment +} + +// SyncGroupResponse is the response structure for the SyncGroup function. +type SyncGroupResponse struct { + // An error that may have occurred when attempting to sync the group. + // + // The errors contain the kafka error code. Programs may use the standard + // errors.Is function to test the error against kafka error codes. + Error error + + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // The group protocol type. + ProtocolType string + + // The group protocol name. + ProtocolName string + + // The member assignment. + Assignment GroupProtocolAssignment +} + +// GroupProtocolAssignment represents an assignment of topics and partitions for a group memeber. +type GroupProtocolAssignment struct { + // The topics and partitions assigned to the group memeber. + AssignedPartitions map[string][]int + + // UserData for the assignemnt. + UserData []byte +} + +// SyncGroup sends a sync group request to the coordinator and returns the response. +func (c *Client) SyncGroup(ctx context.Context, req *SyncGroupRequest) (*SyncGroupResponse, error) { + syncGroup := syncgroup.Request{ + GroupID: req.GroupID, + GenerationID: int32(req.GenerationID), + MemberID: req.MemberID, + GroupInstanceID: req.GroupInstanceID, + ProtocolType: req.ProtocolType, + ProtocolName: req.ProtocolName, + Assignments: make([]syncgroup.RequestAssignment, 0, len(req.Assignments)), + } + + for _, assignment := range req.Assignments { + assign := consumer.Assignment{ + Version: consumer.MaxVersionSupported, + AssignedPartitions: make([]consumer.TopicPartition, 0, len(assignment.Assignment.AssignedPartitions)), + UserData: assignment.Assignment.UserData, + } + + for topic, partitions := range assignment.Assignment.AssignedPartitions { + tp := consumer.TopicPartition{ + Topic: topic, + Partitions: make([]int32, 0, len(partitions)), + } + for _, partition := range partitions { + tp.Partitions = append(tp.Partitions, int32(partition)) + } + assign.AssignedPartitions = append(assign.AssignedPartitions, tp) + } + + assignBytes, err := protocol.Marshal(consumer.MaxVersionSupported, assign) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err) + } + + syncGroup.Assignments = append(syncGroup.Assignments, syncgroup.RequestAssignment{ + MemberID: assignment.MemberID, + Assignment: assignBytes, + }) + } + + m, err := c.roundTrip(ctx, req.Addr, &syncGroup) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err) + } + + r := m.(*syncgroup.Response) + + var assignment consumer.Assignment + err = protocol.Unmarshal(r.Assignments, consumer.MaxVersionSupported, &assignment) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).SyncGroup: %w", err) + } + + res := &SyncGroupResponse{ + Throttle: makeDuration(r.ThrottleTimeMS), + Error: makeError(r.ErrorCode, ""), + ProtocolType: r.ProtocolType, + ProtocolName: r.ProtocolName, + Assignment: GroupProtocolAssignment{ + AssignedPartitions: make(map[string][]int, len(assignment.AssignedPartitions)), + UserData: assignment.UserData, + }, + } + partitions := map[string][]int{} + for _, topicPartition := range assignment.AssignedPartitions { + for _, partition := range topicPartition.Partitions { + partitions[topicPartition.Topic] = append(partitions[topicPartition.Topic], int(partition)) + } + } + res.Assignment.AssignedPartitions = partitions + + return res, nil +} + +type groupAssignment struct { + Version int16 + Topics map[string][]int32 + UserData []byte +} + +func (t groupAssignment) size() int32 { + sz := sizeofInt16(t.Version) + sizeofInt16(int16(len(t.Topics))) + + for topic, partitions := range t.Topics { + sz += sizeofString(topic) + sizeofInt32Array(partitions) + } + + return sz + sizeofBytes(t.UserData) +} + +func (t groupAssignment) writeTo(wb *writeBuffer) { + wb.writeInt16(t.Version) + wb.writeInt32(int32(len(t.Topics))) + + for topic, partitions := range t.Topics { + wb.writeString(topic) + wb.writeInt32Array(partitions) + } + + wb.writeBytes(t.UserData) +} + +func (t *groupAssignment) readFrom(r *bufio.Reader, size int) (remain int, err error) { + // I came across this case when testing for compatibility with bsm/sarama-cluster. It + // appears in some cases, sarama-cluster can send a nil array entry. Admittedly, I + // didn't look too closely at it. + if size == 0 { + t.Topics = map[string][]int32{} + return 0, nil + } + + if remain, err = readInt16(r, size, &t.Version); err != nil { + return + } + if remain, err = readMapStringInt32(r, remain, &t.Topics); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.UserData); err != nil { + return + } + + return +} + +func (t groupAssignment) bytes() []byte { + buf := bytes.NewBuffer(nil) + t.writeTo(&writeBuffer{w: buf}) + return buf.Bytes() +} + +type syncGroupRequestGroupAssignmentV0 struct { + // MemberID assigned by the group coordinator + MemberID string + + // MemberAssignments holds client encoded assignments + // + // See consumer groups section of https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + MemberAssignments []byte +} + +func (t syncGroupRequestGroupAssignmentV0) size() int32 { + return sizeofString(t.MemberID) + + sizeofBytes(t.MemberAssignments) +} + +func (t syncGroupRequestGroupAssignmentV0) writeTo(wb *writeBuffer) { + wb.writeString(t.MemberID) + wb.writeBytes(t.MemberAssignments) +} + +type syncGroupRequestV0 struct { + // GroupID holds the unique group identifier + GroupID string + + // GenerationID holds the generation of the group. + GenerationID int32 + + // MemberID assigned by the group coordinator + MemberID string + + GroupAssignments []syncGroupRequestGroupAssignmentV0 +} + +func (t syncGroupRequestV0) size() int32 { + return sizeofString(t.GroupID) + + sizeofInt32(t.GenerationID) + + sizeofString(t.MemberID) + + sizeofArray(len(t.GroupAssignments), func(i int) int32 { return t.GroupAssignments[i].size() }) +} + +func (t syncGroupRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) + wb.writeArray(len(t.GroupAssignments), func(i int) { t.GroupAssignments[i].writeTo(wb) }) +} + +type syncGroupResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // MemberAssignments holds client encoded assignments + // + // See consumer groups section of https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + MemberAssignments []byte +} + +func (t syncGroupResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + + sizeofBytes(t.MemberAssignments) +} + +func (t syncGroupResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeBytes(t.MemberAssignments) +} + +func (t *syncGroupResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.MemberAssignments); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/time.go b/vendor/github.com/segmentio/kafka-go/time.go new file mode 100644 index 00000000000..544d84207f0 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/time.go @@ -0,0 +1,58 @@ +package kafka + +import ( + "math" + "time" +) + +const ( + maxTimeout = time.Duration(math.MaxInt32) * time.Millisecond + minTimeout = time.Duration(math.MinInt32) * time.Millisecond + defaultRTT = 1 * time.Second +) + +func makeTime(t int64) time.Time { + if t <= 0 { + return time.Time{} + } + return time.Unix(t/1000, (t%1000)*int64(time.Millisecond)).UTC() +} + +func timestamp(t time.Time) int64 { + if t.IsZero() { + return 0 + } + return t.UnixNano() / int64(time.Millisecond) +} + +func makeDuration(ms int32) time.Duration { + return time.Duration(ms) * time.Millisecond +} + +func milliseconds(d time.Duration) int32 { + switch { + case d > maxTimeout: + d = maxTimeout + case d < minTimeout: + d = minTimeout + } + return int32(d / time.Millisecond) +} + +func deadlineToTimeout(deadline time.Time, now time.Time) time.Duration { + if deadline.IsZero() { + return maxTimeout + } + return deadline.Sub(now) +} + +func adjustDeadlineForRTT(deadline time.Time, now time.Time, rtt time.Duration) time.Time { + if !deadline.IsZero() { + timeout := deadline.Sub(now) + if timeout < rtt { + rtt = timeout / 4 + } + deadline = deadline.Add(-rtt) + } + return deadline +} diff --git a/vendor/github.com/segmentio/kafka-go/transport.go b/vendor/github.com/segmentio/kafka-go/transport.go new file mode 100644 index 00000000000..685bdddb18d --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/transport.go @@ -0,0 +1,1363 @@ +package kafka + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "math/rand" + "net" + "runtime/pprof" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/segmentio/kafka-go/protocol" + "github.com/segmentio/kafka-go/protocol/apiversions" + "github.com/segmentio/kafka-go/protocol/createtopics" + "github.com/segmentio/kafka-go/protocol/findcoordinator" + meta "github.com/segmentio/kafka-go/protocol/metadata" + "github.com/segmentio/kafka-go/protocol/saslauthenticate" + "github.com/segmentio/kafka-go/protocol/saslhandshake" + "github.com/segmentio/kafka-go/sasl" +) + +// Request is an interface implemented by types that represent messages sent +// from kafka clients to brokers. +type Request = protocol.Message + +// Response is an interface implemented by types that represent messages sent +// from kafka brokers in response to client requests. +type Response = protocol.Message + +// RoundTripper is an interface implemented by types which support interacting +// with kafka brokers. +type RoundTripper interface { + // RoundTrip sends a request to a kafka broker and returns the response that + // was received, or a non-nil error. + // + // The context passed as first argument can be used to asynchronnously abort + // the call if needed. + RoundTrip(context.Context, net.Addr, Request) (Response, error) +} + +// Transport is an implementation of the RoundTripper interface. +// +// Transport values manage a pool of connections and automatically discovers the +// clusters layout to route requests to the appropriate brokers. +// +// Transport values are safe to use concurrently from multiple goroutines. +// +// Note: The intent is for the Transport to become the underlying layer of the +// kafka.Reader and kafka.Writer types. +type Transport struct { + // A function used to establish connections to the kafka cluster. + Dial func(context.Context, string, string) (net.Conn, error) + + // Time limit set for establishing connections to the kafka cluster. This + // limit includes all round trips done to establish the connections (TLS + // handshake, SASL negotiation, etc...). + // + // Defaults to 5s. + DialTimeout time.Duration + + // Maximum amount of time that connections will remain open and unused. + // The transport will manage to automatically close connections that have + // been idle for too long, and re-open them on demand when the transport is + // used again. + // + // Defaults to 30s. + IdleTimeout time.Duration + + // TTL for the metadata cached by this transport. Note that the value + // configured here is an upper bound, the transport randomizes the TTLs to + // avoid getting into states where multiple clients end up synchronized and + // cause bursts of requests to the kafka broker. + // + // Default to 6s. + MetadataTTL time.Duration + + // Topic names for the metadata cached by this transport. If this field is left blank, + // metadata information of all topics in the cluster will be retrieved. + MetadataTopics []string + + // Unique identifier that the transport communicates to the brokers when it + // sends requests. + ClientID string + + // An optional configuration for TLS connections established by this + // transport. + // + // If the Server + TLS *tls.Config + + // SASL configures the Transfer to use SASL authentication. + SASL sasl.Mechanism + + // An optional resolver used to translate broker host names into network + // addresses. + // + // The resolver will be called for every request (not every connection), + // making it possible to implement ACL policies by validating that the + // program is allowed to connect to the kafka broker. This also means that + // the resolver should probably provide a caching layer to avoid storming + // the service discovery backend with requests. + // + // When set, the Dial function is not responsible for performing name + // resolution, and is always called with a pre-resolved address. + Resolver BrokerResolver + + // The background context used to control goroutines started internally by + // the transport. + // + // If nil, context.Background() is used instead. + Context context.Context + + mutex sync.RWMutex + pools map[networkAddress]*connPool +} + +// DefaultTransport is the default transport used by kafka clients in this +// package. +var DefaultTransport RoundTripper = &Transport{ + Dial: (&net.Dialer{ + Timeout: 3 * time.Second, + DualStack: true, + }).DialContext, +} + +// CloseIdleConnections closes all idle connections immediately, and marks all +// connections that are in use to be closed when they become idle again. +func (t *Transport) CloseIdleConnections() { + t.mutex.Lock() + defer t.mutex.Unlock() + + for _, pool := range t.pools { + pool.unref() + } + + for k := range t.pools { + delete(t.pools, k) + } +} + +// RoundTrip sends a request to a kafka cluster and returns the response, or an +// error if no responses were received. +// +// Message types are available in sub-packages of the protocol package. Each +// kafka API is implemented in a different sub-package. For example, the request +// and response types for the Fetch API are available in the protocol/fetch +// package. +// +// The type of the response message will match the type of the request. For +// example, if RoundTrip was called with a *fetch.Request as argument, the value +// returned will be of type *fetch.Response. It is safe for the program to do a +// type assertion after checking that no error was returned. +// +// This example illustrates the way this method is expected to be used: +// +// r, err := transport.RoundTrip(ctx, addr, &fetch.Request{ ... }) +// if err != nil { +// ... +// } else { +// res := r.(*fetch.Response) +// ... +// } +// +// The transport automatically selects the highest version of the API that is +// supported by both the kafka-go package and the kafka broker. The negotiation +// happens transparently once when connections are established. +// +// This API was introduced in version 0.4 as a way to leverage the lower-level +// features of the kafka protocol, but also provide a more efficient way of +// managing connections to kafka brokers. +func (t *Transport) RoundTrip(ctx context.Context, addr net.Addr, req Request) (Response, error) { + p := t.grabPool(addr) + defer p.unref() + return p.roundTrip(ctx, req) +} + +func (t *Transport) dial() func(context.Context, string, string) (net.Conn, error) { + if t.Dial != nil { + return t.Dial + } + return defaultDialer.DialContext +} + +func (t *Transport) dialTimeout() time.Duration { + if t.DialTimeout > 0 { + return t.DialTimeout + } + return 5 * time.Second +} + +func (t *Transport) idleTimeout() time.Duration { + if t.IdleTimeout > 0 { + return t.IdleTimeout + } + return 30 * time.Second +} + +func (t *Transport) metadataTTL() time.Duration { + if t.MetadataTTL > 0 { + return t.MetadataTTL + } + return 6 * time.Second +} + +func (t *Transport) grabPool(addr net.Addr) *connPool { + k := networkAddress{ + network: addr.Network(), + address: addr.String(), + } + + t.mutex.RLock() + p := t.pools[k] + if p != nil { + p.ref() + } + t.mutex.RUnlock() + + if p != nil { + return p + } + + t.mutex.Lock() + defer t.mutex.Unlock() + + if p := t.pools[k]; p != nil { + p.ref() + return p + } + + ctx, cancel := context.WithCancel(t.context()) + + p = &connPool{ + refc: 2, + + dial: t.dial(), + dialTimeout: t.dialTimeout(), + idleTimeout: t.idleTimeout(), + metadataTTL: t.metadataTTL(), + metadataTopics: t.MetadataTopics, + clientID: t.ClientID, + tls: t.TLS, + sasl: t.SASL, + resolver: t.Resolver, + + ready: make(event), + wake: make(chan event), + conns: make(map[int32]*connGroup), + cancel: cancel, + } + + p.ctrl = p.newConnGroup(addr) + go p.discover(ctx, p.wake) + + if t.pools == nil { + t.pools = make(map[networkAddress]*connPool) + } + t.pools[k] = p + return p +} + +func (t *Transport) context() context.Context { + if t.Context != nil { + return t.Context + } + return context.Background() +} + +type event chan struct{} + +func (e event) trigger() { close(e) } + +type connPool struct { + refc uintptr + // Immutable fields of the connection pool. Connections access these field + // on their parent pool in a ready-only fashion, so no synchronization is + // required. + dial func(context.Context, string, string) (net.Conn, error) + dialTimeout time.Duration + idleTimeout time.Duration + metadataTTL time.Duration + metadataTopics []string + clientID string + tls *tls.Config + sasl sasl.Mechanism + resolver BrokerResolver + // Signaling mechanisms to orchestrate communications between the pool and + // the rest of the program. + once sync.Once // ensure that `ready` is triggered only once + ready event // triggered after the first metadata update + wake chan event // used to force metadata updates + cancel context.CancelFunc + // Mutable fields of the connection pool, access must be synchronized. + mutex sync.RWMutex + conns map[int32]*connGroup // data connections used for produce/fetch/etc... + ctrl *connGroup // control connections used for metadata requests + state atomic.Value // cached cluster state +} + +type connPoolState struct { + metadata *meta.Response // last metadata response seen by the pool + err error // last error from metadata requests + layout protocol.Cluster // cluster layout built from metadata response +} + +func (p *connPool) grabState() connPoolState { + state, _ := p.state.Load().(connPoolState) + return state +} + +func (p *connPool) setState(state connPoolState) { + p.state.Store(state) +} + +func (p *connPool) ref() { + atomic.AddUintptr(&p.refc, +1) +} + +func (p *connPool) unref() { + if atomic.AddUintptr(&p.refc, ^uintptr(0)) == 0 { + p.mutex.Lock() + defer p.mutex.Unlock() + + for _, conns := range p.conns { + conns.closeIdleConns() + } + + p.ctrl.closeIdleConns() + p.cancel() + } +} + +func (p *connPool) roundTrip(ctx context.Context, req Request) (Response, error) { + // This first select should never block after the first metadata response + // that would mark the pool as `ready`. + select { + case <-p.ready: + case <-ctx.Done(): + return nil, ctx.Err() + } + + state := p.grabState() + var response promise + + switch m := req.(type) { + case *meta.Request: + // We serve metadata requests directly from the transport cache unless + // we would like to auto create a topic that isn't in our cache. + // + // This reduces the number of round trips to kafka brokers while keeping + // the logic simple when applying partitioning strategies. + if state.err != nil { + return nil, state.err + } + + cachedMeta := filterMetadataResponse(m, state.metadata) + // requestNeeded indicates if we need to send this metadata request to the server. + // It's true when we want to auto-create topics and we don't have the topic in our + // cache. + var requestNeeded bool + if m.AllowAutoTopicCreation { + for _, topic := range cachedMeta.Topics { + if topic.ErrorCode == int16(UnknownTopicOrPartition) { + requestNeeded = true + break + } + } + } + + if !requestNeeded { + return cachedMeta, nil + } + + case protocol.Splitter: + // Messages that implement the Splitter interface trigger the creation of + // multiple requests that are all merged back into a single results by + // a merger. + messages, merger, err := m.Split(state.layout) + if err != nil { + return nil, err + } + promises := make([]promise, len(messages)) + for i, m := range messages { + promises[i] = p.sendRequest(ctx, m, state) + } + response = join(promises, messages, merger) + } + + if response == nil { + response = p.sendRequest(ctx, req, state) + } + + r, err := response.await(ctx) + if err != nil { + return r, err + } + + switch resp := r.(type) { + case *createtopics.Response: + // Force an update of the metadata when adding topics, + // otherwise the cached state would get out of sync. + topicsToRefresh := make([]string, 0, len(resp.Topics)) + for _, topic := range resp.Topics { + // fixes issue 672: don't refresh topics that failed to create, it causes the library to hang indefinitely + if topic.ErrorCode != 0 { + continue + } + + topicsToRefresh = append(topicsToRefresh, topic.Name) + } + + p.refreshMetadata(ctx, topicsToRefresh) + case *meta.Response: + m := req.(*meta.Request) + // If we get here with allow auto topic creation then + // we didn't have that topic in our cache, so we should update + // the cache. + if m.AllowAutoTopicCreation { + topicsToRefresh := make([]string, 0, len(resp.Topics)) + for _, topic := range resp.Topics { + // Don't refresh topics that failed to create, since that may + // mean that enable automatic topic creation is not enabled. + // That causes the library to hang indefinitely, same as + // don't refresh topics that failed to create, + // createtopics process. Fixes issue 806. + if topic.ErrorCode != 0 { + continue + } + + topicsToRefresh = append(topicsToRefresh, topic.Name) + } + p.refreshMetadata(ctx, topicsToRefresh) + } + } + + return r, nil +} + +// refreshMetadata forces an update of the cached cluster metadata, and waits +// for the given list of topics to appear. This waiting mechanism is necessary +// to account for the fact that topic creation is asynchronous in kafka, and +// causes subsequent requests to fail while the cluster state is propagated to +// all the brokers. +func (p *connPool) refreshMetadata(ctx context.Context, expectTopics []string) { + minBackoff := 100 * time.Millisecond + maxBackoff := 2 * time.Second + cancel := ctx.Done() + + for ctx.Err() == nil { + notify := make(event) + select { + case <-cancel: + return + case p.wake <- notify: + select { + case <-notify: + case <-cancel: + return + } + } + + state := p.grabState() + found := 0 + + for _, topic := range expectTopics { + if _, ok := state.layout.Topics[topic]; ok { + found++ + } + } + + if found == len(expectTopics) { + return + } + + if delay := time.Duration(rand.Int63n(int64(minBackoff))); delay > 0 { + timer := time.NewTimer(minBackoff) + select { + case <-cancel: + case <-timer.C: + } + timer.Stop() + + if minBackoff *= 2; minBackoff > maxBackoff { + minBackoff = maxBackoff + } + } + } +} + +func (p *connPool) setReady() { + p.once.Do(p.ready.trigger) +} + +// update is called periodically by the goroutine running the discover method +// to refresh the cluster layout information used by the transport to route +// requests to brokers. +func (p *connPool) update(ctx context.Context, metadata *meta.Response, err error) { + var layout protocol.Cluster + + if metadata != nil { + metadata.ThrottleTimeMs = 0 + + // Normalize the lists so we can apply binary search on them. + sortMetadataBrokers(metadata.Brokers) + sortMetadataTopics(metadata.Topics) + + for i := range metadata.Topics { + t := &metadata.Topics[i] + sortMetadataPartitions(t.Partitions) + } + + layout = makeLayout(metadata) + } + + state := p.grabState() + addBrokers := make(map[int32]struct{}) + delBrokers := make(map[int32]struct{}) + + if err != nil { + // Only update the error on the transport if the cluster layout was + // unknown. This ensures that we prioritize a previously known state + // of the cluster to reduce the impact of transient failures. + if state.metadata != nil { + return + } + state.err = err + } else { + for id, b2 := range layout.Brokers { + if b1, ok := state.layout.Brokers[id]; !ok { + addBrokers[id] = struct{}{} + } else if b1 != b2 { + addBrokers[id] = struct{}{} + delBrokers[id] = struct{}{} + } + } + + for id := range state.layout.Brokers { + if _, ok := layout.Brokers[id]; !ok { + delBrokers[id] = struct{}{} + } + } + + state.metadata, state.layout = metadata, layout + state.err = nil + } + + defer p.setReady() + defer p.setState(state) + + if len(addBrokers) != 0 || len(delBrokers) != 0 { + // Only acquire the lock when there is a change of layout. This is an + // infrequent event so we don't risk introducing regular contention on + // the mutex if we were to lock it on every update. + p.mutex.Lock() + defer p.mutex.Unlock() + + if ctx.Err() != nil { + return // the pool has been closed, no need to update + } + + for id := range delBrokers { + if broker := p.conns[id]; broker != nil { + broker.closeIdleConns() + delete(p.conns, id) + } + } + + for id := range addBrokers { + broker := layout.Brokers[id] + p.conns[id] = p.newBrokerConnGroup(Broker{ + Rack: broker.Rack, + Host: broker.Host, + Port: int(broker.Port), + ID: int(broker.ID), + }) + } + } +} + +// discover is the entry point of an internal goroutine for the transport which +// periodically requests updates of the cluster metadata and refreshes the +// transport cached cluster layout. +func (p *connPool) discover(ctx context.Context, wake <-chan event) { + prng := rand.New(rand.NewSource(time.Now().UnixNano())) + metadataTTL := func() time.Duration { + return time.Duration(prng.Int63n(int64(p.metadataTTL))) + } + + timer := time.NewTimer(metadataTTL()) + defer timer.Stop() + + var notify event + done := ctx.Done() + + req := &meta.Request{ + TopicNames: p.metadataTopics, + } + + for { + c, err := p.grabClusterConn(ctx) + if err != nil { + p.update(ctx, nil, err) + } else { + res := make(async, 1) + deadline, cancel := context.WithTimeout(ctx, p.metadataTTL) + c.reqs <- connRequest{ + ctx: deadline, + req: req, + res: res, + } + r, err := res.await(deadline) + cancel() + if err != nil && errors.Is(err, ctx.Err()) { + return + } + ret, _ := r.(*meta.Response) + p.update(ctx, ret, err) + } + + if notify != nil { + notify.trigger() + notify = nil + } + + select { + case <-timer.C: + timer.Reset(metadataTTL()) + case <-done: + return + case notify = <-wake: + } + } +} + +// grabBrokerConn returns a connection to a specific broker represented by the +// broker id passed as argument. If the broker id was not known, an error is +// returned. +func (p *connPool) grabBrokerConn(ctx context.Context, brokerID int32) (*conn, error) { + p.mutex.RLock() + g := p.conns[brokerID] + p.mutex.RUnlock() + if g == nil { + return nil, BrokerNotAvailable + } + return g.grabConnOrConnect(ctx) +} + +// grabClusterConn returns the connection to the kafka cluster that the pool is +// configured to connect to. +// +// The transport uses a shared `control` connection to the cluster for any +// requests that aren't supposed to be sent to specific brokers (e.g. Fetch or +// Produce requests). Requests intended to be routed to specific brokers are +// dispatched on a separate pool of connections that the transport maintains. +// This split help avoid head-of-line blocking situations where control requests +// like Metadata would be queued behind large responses from Fetch requests for +// example. +// +// In either cases, the requests are multiplexed so we can keep a minimal number +// of connections open (N+1, where N is the number of brokers in the cluster). +func (p *connPool) grabClusterConn(ctx context.Context) (*conn, error) { + return p.ctrl.grabConnOrConnect(ctx) +} + +func (p *connPool) sendRequest(ctx context.Context, req Request, state connPoolState) promise { + brokerID := int32(-1) + + switch m := req.(type) { + case protocol.BrokerMessage: + // Some requests are supposed to be sent to specific brokers (e.g. the + // partition leaders). They implement the BrokerMessage interface to + // delegate the routing decision to each message type. + broker, err := m.Broker(state.layout) + if err != nil { + return reject(err) + } + brokerID = broker.ID + + case protocol.GroupMessage: + // Some requests are supposed to be sent to a group coordinator, + // look up which broker is currently the coordinator for the group + // so we can get a connection to that broker. + // + // TODO: should we cache the coordinator info? + p := p.sendRequest(ctx, &findcoordinator.Request{Key: m.Group()}, state) + r, err := p.await(ctx) + if err != nil { + return reject(err) + } + brokerID = r.(*findcoordinator.Response).NodeID + case protocol.TransactionalMessage: + p := p.sendRequest(ctx, &findcoordinator.Request{ + Key: m.Transaction(), + KeyType: int8(CoordinatorKeyTypeTransaction), + }, state) + r, err := p.await(ctx) + if err != nil { + return reject(err) + } + brokerID = r.(*findcoordinator.Response).NodeID + } + + var c *conn + var err error + if brokerID >= 0 { + c, err = p.grabBrokerConn(ctx, brokerID) + } else { + c, err = p.grabClusterConn(ctx) + } + if err != nil { + return reject(err) + } + + res := make(async, 1) + + c.reqs <- connRequest{ + ctx: ctx, + req: req, + res: res, + } + + return res +} + +func filterMetadataResponse(req *meta.Request, res *meta.Response) *meta.Response { + ret := *res + + if req.TopicNames != nil { + ret.Topics = make([]meta.ResponseTopic, len(req.TopicNames)) + + for i, topicName := range req.TopicNames { + j, ok := findMetadataTopic(res.Topics, topicName) + if ok { + ret.Topics[i] = res.Topics[j] + } else { + ret.Topics[i] = meta.ResponseTopic{ + ErrorCode: int16(UnknownTopicOrPartition), + Name: topicName, + } + } + } + } + + return &ret +} + +func findMetadataTopic(topics []meta.ResponseTopic, topicName string) (int, bool) { + i := sort.Search(len(topics), func(i int) bool { + return topics[i].Name >= topicName + }) + return i, i >= 0 && i < len(topics) && topics[i].Name == topicName +} + +func sortMetadataBrokers(brokers []meta.ResponseBroker) { + sort.Slice(brokers, func(i, j int) bool { + return brokers[i].NodeID < brokers[j].NodeID + }) +} + +func sortMetadataTopics(topics []meta.ResponseTopic) { + sort.Slice(topics, func(i, j int) bool { + return topics[i].Name < topics[j].Name + }) +} + +func sortMetadataPartitions(partitions []meta.ResponsePartition) { + sort.Slice(partitions, func(i, j int) bool { + return partitions[i].PartitionIndex < partitions[j].PartitionIndex + }) +} + +func makeLayout(metadataResponse *meta.Response) protocol.Cluster { + layout := protocol.Cluster{ + Controller: metadataResponse.ControllerID, + Brokers: make(map[int32]protocol.Broker), + Topics: make(map[string]protocol.Topic), + } + + for _, broker := range metadataResponse.Brokers { + layout.Brokers[broker.NodeID] = protocol.Broker{ + Rack: broker.Rack, + Host: broker.Host, + Port: broker.Port, + ID: broker.NodeID, + } + } + + for _, topic := range metadataResponse.Topics { + if topic.IsInternal { + continue // TODO: do we need to expose those? + } + layout.Topics[topic.Name] = protocol.Topic{ + Name: topic.Name, + Error: topic.ErrorCode, + Partitions: makePartitions(topic.Partitions), + } + } + + return layout +} + +func makePartitions(metadataPartitions []meta.ResponsePartition) map[int32]protocol.Partition { + protocolPartitions := make(map[int32]protocol.Partition, len(metadataPartitions)) + numBrokerIDs := 0 + + for _, p := range metadataPartitions { + numBrokerIDs += len(p.ReplicaNodes) + len(p.IsrNodes) + len(p.OfflineReplicas) + } + + // Reduce the memory footprint a bit by allocating a single buffer to write + // all broker ids. + brokerIDs := make([]int32, 0, numBrokerIDs) + + for _, p := range metadataPartitions { + var rep, isr, off []int32 + brokerIDs, rep = appendBrokerIDs(brokerIDs, p.ReplicaNodes) + brokerIDs, isr = appendBrokerIDs(brokerIDs, p.IsrNodes) + brokerIDs, off = appendBrokerIDs(brokerIDs, p.OfflineReplicas) + + protocolPartitions[p.PartitionIndex] = protocol.Partition{ + ID: p.PartitionIndex, + Error: p.ErrorCode, + Leader: p.LeaderID, + Replicas: rep, + ISR: isr, + Offline: off, + } + } + + return protocolPartitions +} + +func appendBrokerIDs(ids, brokers []int32) ([]int32, []int32) { + i := len(ids) + ids = append(ids, brokers...) + return ids, ids[i:len(ids):len(ids)] +} + +func (p *connPool) newConnGroup(a net.Addr) *connGroup { + return &connGroup{ + addr: a, + pool: p, + broker: Broker{ + ID: -1, + }, + } +} + +func (p *connPool) newBrokerConnGroup(broker Broker) *connGroup { + return &connGroup{ + addr: &networkAddress{ + network: "tcp", + address: net.JoinHostPort(broker.Host, strconv.Itoa(broker.Port)), + }, + pool: p, + broker: broker, + } +} + +type connRequest struct { + ctx context.Context + req Request + res async +} + +// The promise interface is used as a message passing abstraction to coordinate +// between goroutines that handle requests and responses. +type promise interface { + // Waits until the promise is resolved, rejected, or the context canceled. + await(context.Context) (Response, error) +} + +// async is an implementation of the promise interface which supports resolving +// or rejecting the await call asynchronously. +type async chan interface{} + +func (p async) await(ctx context.Context) (Response, error) { + select { + case x := <-p: + switch v := x.(type) { + case nil: + return nil, nil // A nil response is ok (e.g. when RequiredAcks is None) + case Response: + return v, nil + case error: + return nil, v + default: + panic(fmt.Errorf("BUG: promise resolved with impossible value of type %T", v)) + } + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (p async) resolve(res Response) { p <- res } + +func (p async) reject(err error) { p <- err } + +// rejected is an implementation of the promise interface which is always +// returns an error. Values of this type are constructed using the reject +// function. +type rejected struct{ err error } + +func reject(err error) promise { return &rejected{err: err} } + +func (p *rejected) await(ctx context.Context) (Response, error) { + return nil, p.err +} + +// joined is an implementation of the promise interface which merges results +// from multiple promises into one await call using a merger. +type joined struct { + promises []promise + requests []Request + merger protocol.Merger +} + +func join(promises []promise, requests []Request, merger protocol.Merger) promise { + return &joined{ + promises: promises, + requests: requests, + merger: merger, + } +} + +func (p *joined) await(ctx context.Context) (Response, error) { + results := make([]interface{}, len(p.promises)) + + for i, sub := range p.promises { + m, err := sub.await(ctx) + if err != nil { + results[i] = err + } else { + results[i] = m + } + } + + return p.merger.Merge(p.requests, results) +} + +// Default dialer used by the transport connections when no Dial function +// was configured by the program. +var defaultDialer = net.Dialer{ + Timeout: 3 * time.Second, + DualStack: true, +} + +// connGroup represents a logical connection group to a kafka broker. The +// actual network connections are lazily open before sending requests, and +// closed if they are unused for longer than the idle timeout. +type connGroup struct { + addr net.Addr + broker Broker + // Immutable state of the connection. + pool *connPool + // Shared state of the connection, this is synchronized on the mutex through + // calls to the synchronized method. Both goroutines of the connection share + // the state maintained in these fields. + mutex sync.Mutex + closed bool + idleConns []*conn // stack of idle connections +} + +func (g *connGroup) closeIdleConns() { + g.mutex.Lock() + conns := g.idleConns + g.idleConns = nil + g.closed = true + g.mutex.Unlock() + + for _, c := range conns { + c.close() + } +} + +func (g *connGroup) grabConnOrConnect(ctx context.Context) (*conn, error) { + rslv := g.pool.resolver + addr := g.addr + var c *conn + + if rslv == nil { + c = g.grabConn() + } else { + var err error + broker := g.broker + + if broker.ID < 0 { + host, port, err := splitHostPortNumber(addr.String()) + if err != nil { + return nil, err + } + broker.Host = host + broker.Port = port + } + + ipAddrs, err := rslv.LookupBrokerIPAddr(ctx, broker) + if err != nil { + return nil, err + } + + for _, ipAddr := range ipAddrs { + network := addr.Network() + address := net.JoinHostPort(ipAddr.String(), strconv.Itoa(broker.Port)) + + if c = g.grabConnTo(network, address); c != nil { + break + } + } + } + + if c == nil { + connChan := make(chan *conn) + errChan := make(chan error) + + go func() { + c, err := g.connect(ctx, addr) + if err != nil { + select { + case errChan <- err: + case <-ctx.Done(): + } + } else { + select { + case connChan <- c: + case <-ctx.Done(): + if !g.releaseConn(c) { + c.close() + } + } + } + }() + + select { + case c = <-connChan: + case err := <-errChan: + return nil, err + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + return c, nil +} + +func (g *connGroup) grabConnTo(network, address string) *conn { + g.mutex.Lock() + defer g.mutex.Unlock() + + for i := len(g.idleConns) - 1; i >= 0; i-- { + c := g.idleConns[i] + + if c.network == network && c.address == address { + copy(g.idleConns[i:], g.idleConns[i+1:]) + n := len(g.idleConns) - 1 + g.idleConns[n] = nil + g.idleConns = g.idleConns[:n] + + if c.timer != nil { + c.timer.Stop() + } + + return c + } + } + + return nil +} + +func (g *connGroup) grabConn() *conn { + g.mutex.Lock() + defer g.mutex.Unlock() + + if len(g.idleConns) == 0 { + return nil + } + + n := len(g.idleConns) - 1 + c := g.idleConns[n] + g.idleConns[n] = nil + g.idleConns = g.idleConns[:n] + + if c.timer != nil { + c.timer.Stop() + } + + return c +} + +func (g *connGroup) removeConn(c *conn) bool { + g.mutex.Lock() + defer g.mutex.Unlock() + + if c.timer != nil { + c.timer.Stop() + } + + for i, x := range g.idleConns { + if x == c { + copy(g.idleConns[i:], g.idleConns[i+1:]) + n := len(g.idleConns) - 1 + g.idleConns[n] = nil + g.idleConns = g.idleConns[:n] + return true + } + } + + return false +} + +func (g *connGroup) releaseConn(c *conn) bool { + idleTimeout := g.pool.idleTimeout + + g.mutex.Lock() + defer g.mutex.Unlock() + + if g.closed { + return false + } + + if c.timer != nil { + c.timer.Reset(idleTimeout) + } else { + c.timer = time.AfterFunc(idleTimeout, func() { + if g.removeConn(c) { + c.close() + } + }) + } + + g.idleConns = append(g.idleConns, c) + return true +} + +func (g *connGroup) connect(ctx context.Context, addr net.Addr) (*conn, error) { + deadline := time.Now().Add(g.pool.dialTimeout) + + ctx, cancel := context.WithDeadline(ctx, deadline) + defer cancel() + + network := strings.Split(addr.Network(), ",") + address := strings.Split(addr.String(), ",") + var netConn net.Conn + var netAddr net.Addr + var err error + + if len(address) > 1 { + // Shuffle the list of addresses to randomize the order in which + // connections are attempted. This prevents routing all connections + // to the first broker (which will usually succeed). + rand.Shuffle(len(address), func(i, j int) { + network[i], network[j] = network[j], network[i] + address[i], address[j] = address[j], address[i] + }) + } + + for i := range address { + netConn, err = g.pool.dial(ctx, network[i], address[i]) + if err == nil { + netAddr = &networkAddress{ + network: network[i], + address: address[i], + } + break + } + } + + if err != nil { + return nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if tlsConfig := g.pool.tls; tlsConfig != nil { + if tlsConfig.ServerName == "" { + host, _ := splitHostPort(netAddr.String()) + tlsConfig = tlsConfig.Clone() + tlsConfig.ServerName = host + } + netConn = tls.Client(netConn, tlsConfig) + } + + pc := protocol.NewConn(netConn, g.pool.clientID) + pc.SetDeadline(deadline) + + r, err := pc.RoundTrip(new(apiversions.Request)) + if err != nil { + return nil, err + } + res := r.(*apiversions.Response) + ver := make(map[protocol.ApiKey]int16, len(res.ApiKeys)) + + if res.ErrorCode != 0 { + return nil, fmt.Errorf("negotating API versions with kafka broker at %s: %w", g.addr, Error(res.ErrorCode)) + } + + for _, r := range res.ApiKeys { + apiKey := protocol.ApiKey(r.ApiKey) + ver[apiKey] = apiKey.SelectVersion(r.MinVersion, r.MaxVersion) + } + + pc.SetVersions(ver) + pc.SetDeadline(time.Time{}) + + if g.pool.sasl != nil { + host, port, err := splitHostPortNumber(netAddr.String()) + if err != nil { + return nil, err + } + metadata := &sasl.Metadata{ + Host: host, + Port: port, + } + if err := authenticateSASL(sasl.WithMetadata(ctx, metadata), pc, g.pool.sasl); err != nil { + return nil, err + } + } + + reqs := make(chan connRequest) + c := &conn{ + network: netAddr.Network(), + address: netAddr.String(), + reqs: reqs, + group: g, + } + go c.run(pc, reqs) + + netConn = nil + return c, nil +} + +type conn struct { + reqs chan<- connRequest + network string + address string + once sync.Once + group *connGroup + timer *time.Timer +} + +func (c *conn) close() { + c.once.Do(func() { close(c.reqs) }) +} + +func (c *conn) run(pc *protocol.Conn, reqs <-chan connRequest) { + defer pc.Close() + + for cr := range reqs { + r, err := c.roundTrip(cr.ctx, pc, cr.req) + if err != nil { + cr.res.reject(err) + if !errors.Is(err, protocol.ErrNoRecord) { + break + } + } else { + cr.res.resolve(r) + } + if !c.group.releaseConn(c) { + break + } + } +} + +func (c *conn) roundTrip(ctx context.Context, pc *protocol.Conn, req Request) (Response, error) { + pprof.SetGoroutineLabels(ctx) + defer pprof.SetGoroutineLabels(context.Background()) + + if deadline, hasDeadline := ctx.Deadline(); hasDeadline { + pc.SetDeadline(deadline) + defer pc.SetDeadline(time.Time{}) + } + + return pc.RoundTrip(req) +} + +// authenticateSASL performs all of the required requests to authenticate this +// connection. If any step fails, this function returns with an error. A nil +// error indicates successful authentication. +func authenticateSASL(ctx context.Context, pc *protocol.Conn, mechanism sasl.Mechanism) error { + if err := saslHandshakeRoundTrip(pc, mechanism.Name()); err != nil { + return err + } + + sess, state, err := mechanism.Start(ctx) + if err != nil { + return err + } + + for completed := false; !completed; { + challenge, err := saslAuthenticateRoundTrip(pc, state) + if err != nil { + if errors.Is(err, io.EOF) { + // the broker may communicate a failed exchange by closing the + // connection (esp. in the case where we're passing opaque sasl + // data over the wire since there's no protocol info). + return SASLAuthenticationFailed + } + + return err + } + + completed, state, err = sess.Next(ctx, challenge) + if err != nil { + return err + } + } + + return nil +} + +// saslHandshake sends the SASL handshake message. This will determine whether +// the Mechanism is supported by the cluster. If it's not, this function will +// error out with UnsupportedSASLMechanism. +// +// If the mechanism is unsupported, the handshake request will reply with the +// list of the cluster's configured mechanisms, which could potentially be used +// to facilitate negotiation. At the moment, we are not negotiating the +// mechanism as we believe that brokers are usually known to the client, and +// therefore the client should already know which mechanisms are supported. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake +func saslHandshakeRoundTrip(pc *protocol.Conn, mechanism string) error { + msg, err := pc.RoundTrip(&saslhandshake.Request{ + Mechanism: mechanism, + }) + if err != nil { + return err + } + res := msg.(*saslhandshake.Response) + if res.ErrorCode != 0 { + err = Error(res.ErrorCode) + } + return err +} + +// saslAuthenticate sends the SASL authenticate message. This function must +// be immediately preceded by a successful saslHandshake. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate +func saslAuthenticateRoundTrip(pc *protocol.Conn, data []byte) ([]byte, error) { + msg, err := pc.RoundTrip(&saslauthenticate.Request{ + AuthBytes: data, + }) + if err != nil { + return nil, err + } + res := msg.(*saslauthenticate.Response) + if res.ErrorCode != 0 { + err = makeError(res.ErrorCode, res.ErrorMessage) + } + return res.AuthBytes, err +} + +var _ RoundTripper = (*Transport)(nil) diff --git a/vendor/github.com/segmentio/kafka-go/txnoffsetcommit.go b/vendor/github.com/segmentio/kafka-go/txnoffsetcommit.go new file mode 100644 index 00000000000..9480fc3a79e --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/txnoffsetcommit.go @@ -0,0 +1,142 @@ +package kafka + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/segmentio/kafka-go/protocol/txnoffsetcommit" +) + +// TxnOffsetCommitRequest represents a request sent to a kafka broker to commit +// offsets for a partition within a transaction. +type TxnOffsetCommitRequest struct { + // Address of the kafka broker to send the request to. + Addr net.Addr + + // The transactional id key. + TransactionalID string + + // ID of the consumer group to publish the offsets for. + GroupID string + + // The Producer ID (PID) for the current producer session; + // received from an InitProducerID request. + ProducerID int + + // The epoch associated with the current producer session for the given PID + ProducerEpoch int + + // GenerationID is the current generation for the group. + GenerationID int + + // ID of the group member submitting the offsets. + MemberID string + + // GroupInstanceID is a unique identifier for the consumer. + GroupInstanceID string + + // Set of topic partitions to publish the offsets for. + // + // Not that offset commits need to be submitted to the broker acting as the + // group coordinator. This will be automatically resolved by the transport. + Topics map[string][]TxnOffsetCommit +} + +// TxnOffsetCommit represent the commit of an offset to a partition within a transaction. +// +// The extra metadata is opaque to the kafka protocol, it is intended to hold +// information like an identifier for the process that committed the offset, +// or the time at which the commit was made. +type TxnOffsetCommit struct { + Partition int + Offset int64 + Metadata string +} + +// TxnOffsetFetchResponse represents a response from a kafka broker to an offset +// commit request within a transaction. +type TxnOffsetCommitResponse struct { + // The amount of time that the broker throttled the request. + Throttle time.Duration + + // Set of topic partitions that the kafka broker has accepted offset commits + // for. + Topics map[string][]TxnOffsetCommitPartition +} + +// TxnOffsetFetchPartition represents the state of a single partition in responses +// to committing offsets within a transaction. +type TxnOffsetCommitPartition struct { + // ID of the partition. + Partition int + + // An error that may have occurred while attempting to publish consumer + // group offsets for this partition. + // + // The error contains both the kafka error code, and an error message + // returned by the kafka broker. Programs may use the standard errors.Is + // function to test the error against kafka error codes. + Error error +} + +// TxnOffsetCommit sends an txn offset commit request to a kafka broker and returns the +// response. +func (c *Client) TxnOffsetCommit( + ctx context.Context, + req *TxnOffsetCommitRequest, +) (*TxnOffsetCommitResponse, error) { + protoReq := &txnoffsetcommit.Request{ + TransactionalID: req.TransactionalID, + GroupID: req.GroupID, + ProducerID: int64(req.ProducerID), + ProducerEpoch: int16(req.ProducerEpoch), + GenerationID: int32(req.GenerationID), + MemberID: req.MemberID, + GroupInstanceID: req.GroupInstanceID, + Topics: make([]txnoffsetcommit.RequestTopic, 0, len(req.Topics)), + } + + for topic, partitions := range req.Topics { + parts := make([]txnoffsetcommit.RequestPartition, len(partitions)) + for i, partition := range partitions { + parts[i] = txnoffsetcommit.RequestPartition{ + Partition: int32(partition.Partition), + CommittedOffset: int64(partition.Offset), + CommittedMetadata: partition.Metadata, + } + } + t := txnoffsetcommit.RequestTopic{ + Name: topic, + Partitions: parts, + } + + protoReq.Topics = append(protoReq.Topics, t) + } + + m, err := c.roundTrip(ctx, req.Addr, protoReq) + if err != nil { + return nil, fmt.Errorf("kafka.(*Client).TxnOffsetCommit: %w", err) + } + + r := m.(*txnoffsetcommit.Response) + + res := &TxnOffsetCommitResponse{ + Throttle: makeDuration(r.ThrottleTimeMs), + Topics: make(map[string][]TxnOffsetCommitPartition, len(r.Topics)), + } + + for _, topic := range r.Topics { + partitions := make([]TxnOffsetCommitPartition, 0, len(topic.Partitions)) + for _, partition := range topic.Partitions { + partitions = append(partitions, TxnOffsetCommitPartition{ + Partition: int(partition.Partition), + Error: makeError(partition.ErrorCode, ""), + }) + } + res.Topics[topic.Name] = partitions + } + + return res, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/write.go b/vendor/github.com/segmentio/kafka-go/write.go new file mode 100644 index 00000000000..3b806509c92 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/write.go @@ -0,0 +1,614 @@ +package kafka + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "time" +) + +type writeBuffer struct { + w io.Writer + b [16]byte +} + +func (wb *writeBuffer) writeInt8(i int8) { + wb.b[0] = byte(i) + wb.Write(wb.b[:1]) +} + +func (wb *writeBuffer) writeInt16(i int16) { + binary.BigEndian.PutUint16(wb.b[:2], uint16(i)) + wb.Write(wb.b[:2]) +} + +func (wb *writeBuffer) writeInt32(i int32) { + binary.BigEndian.PutUint32(wb.b[:4], uint32(i)) + wb.Write(wb.b[:4]) +} + +func (wb *writeBuffer) writeInt64(i int64) { + binary.BigEndian.PutUint64(wb.b[:8], uint64(i)) + wb.Write(wb.b[:8]) +} + +func (wb *writeBuffer) writeVarInt(i int64) { + u := uint64((i << 1) ^ (i >> 63)) + n := 0 + + for u >= 0x80 && n < len(wb.b) { + wb.b[n] = byte(u) | 0x80 + u >>= 7 + n++ + } + + if n < len(wb.b) { + wb.b[n] = byte(u) + n++ + } + + wb.Write(wb.b[:n]) +} + +func (wb *writeBuffer) writeString(s string) { + wb.writeInt16(int16(len(s))) + wb.WriteString(s) +} + +func (wb *writeBuffer) writeVarString(s string) { + wb.writeVarInt(int64(len(s))) + wb.WriteString(s) +} + +func (wb *writeBuffer) writeNullableString(s *string) { + if s == nil { + wb.writeInt16(-1) + } else { + wb.writeString(*s) + } +} + +func (wb *writeBuffer) writeBytes(b []byte) { + n := len(b) + if b == nil { + n = -1 + } + wb.writeInt32(int32(n)) + wb.Write(b) +} + +func (wb *writeBuffer) writeVarBytes(b []byte) { + if b != nil { + wb.writeVarInt(int64(len(b))) + wb.Write(b) + } else { + //-1 is used to indicate nil key + wb.writeVarInt(-1) + } +} + +func (wb *writeBuffer) writeBool(b bool) { + v := int8(0) + if b { + v = 1 + } + wb.writeInt8(v) +} + +func (wb *writeBuffer) writeArrayLen(n int) { + wb.writeInt32(int32(n)) +} + +func (wb *writeBuffer) writeArray(n int, f func(int)) { + wb.writeArrayLen(n) + for i := 0; i < n; i++ { + f(i) + } +} + +func (wb *writeBuffer) writeVarArray(n int, f func(int)) { + wb.writeVarInt(int64(n)) + for i := 0; i < n; i++ { + f(i) + } +} + +func (wb *writeBuffer) writeStringArray(a []string) { + wb.writeArray(len(a), func(i int) { wb.writeString(a[i]) }) +} + +func (wb *writeBuffer) writeInt32Array(a []int32) { + wb.writeArray(len(a), func(i int) { wb.writeInt32(a[i]) }) +} + +func (wb *writeBuffer) write(a interface{}) { + switch v := a.(type) { + case int8: + wb.writeInt8(v) + case int16: + wb.writeInt16(v) + case int32: + wb.writeInt32(v) + case int64: + wb.writeInt64(v) + case string: + wb.writeString(v) + case []byte: + wb.writeBytes(v) + case bool: + wb.writeBool(v) + case writable: + v.writeTo(wb) + default: + panic(fmt.Sprintf("unsupported type: %T", a)) + } +} + +func (wb *writeBuffer) Write(b []byte) (int, error) { + return wb.w.Write(b) +} + +func (wb *writeBuffer) WriteString(s string) (int, error) { + return io.WriteString(wb.w, s) +} + +func (wb *writeBuffer) Flush() error { + if x, ok := wb.w.(interface{ Flush() error }); ok { + return x.Flush() + } + return nil +} + +type writable interface { + writeTo(*writeBuffer) +} + +func (wb *writeBuffer) writeFetchRequestV2(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration) error { + h := requestHeader{ + ApiKey: int16(fetch), + ApiVersion: int16(v2), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // max wait time + 4 + // min bytes + 4 + // topic array length + sizeofString(topic) + + 4 + // partition array length + 4 + // partition + 8 + // offset + 4 // max bytes + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(offset) + wb.writeInt32(int32(maxBytes)) + + return wb.Flush() +} + +func (wb *writeBuffer) writeFetchRequestV5(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration, isolationLevel int8) error { + h := requestHeader{ + ApiKey: int16(fetch), + ApiVersion: int16(v5), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // max wait time + 4 + // min bytes + 4 + // max bytes + 1 + // isolation level + 4 + // topic array length + sizeofString(topic) + + 4 + // partition array length + 4 + // partition + 8 + // offset + 8 + // log start offset + 4 // max bytes + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) + wb.writeInt32(int32(maxBytes)) + wb.writeInt8(isolationLevel) // isolation level 0 - read uncommitted + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(offset) + wb.writeInt64(int64(0)) // log start offset only used when is sent by follower + wb.writeInt32(int32(maxBytes)) + + return wb.Flush() +} + +func (wb *writeBuffer) writeFetchRequestV10(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration, isolationLevel int8) error { + h := requestHeader{ + ApiKey: int16(fetch), + ApiVersion: int16(v10), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // max wait time + 4 + // min bytes + 4 + // max bytes + 1 + // isolation level + 4 + // session ID + 4 + // session epoch + 4 + // topic array length + sizeofString(topic) + + 4 + // partition array length + 4 + // partition + 4 + // current leader epoch + 8 + // fetch offset + 8 + // log start offset + 4 + // partition max bytes + 4 // forgotten topics data + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) + wb.writeInt32(int32(maxBytes)) + wb.writeInt8(isolationLevel) // isolation level 0 - read uncommitted + wb.writeInt32(0) //FIXME + wb.writeInt32(-1) //FIXME + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt32(-1) //FIXME + wb.writeInt64(offset) + wb.writeInt64(int64(0)) // log start offset only used when is sent by follower + wb.writeInt32(int32(maxBytes)) + + // forgotten topics array + wb.writeArrayLen(0) // forgotten topics not supported yet + + return wb.Flush() +} + +func (wb *writeBuffer) writeListOffsetRequestV1(correlationID int32, clientID, topic string, partition int32, time int64) error { + h := requestHeader{ + ApiKey: int16(listOffsets), + ApiVersion: int16(v1), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 8 // time + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(time) + + return wb.Flush() +} + +func (wb *writeBuffer) writeProduceRequestV2(codec CompressionCodec, correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, msgs ...Message) (err error) { + var size int32 + var attributes int8 + var compressed *bytes.Buffer + + if codec == nil { + size = messageSetSize(msgs...) + } else { + compressed, attributes, size, err = compressMessageSet(codec, msgs...) + if err != nil { + return + } + msgs = []Message{{Value: compressed.Bytes()}} + } + + h := requestHeader{ + ApiKey: int16(produce), + ApiVersion: int16(v2), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 2 + // required acks + 4 + // timeout + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 4 + // message set size + size + + h.writeTo(wb) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + + wb.writeInt32(size) + cw := &crc32Writer{table: crc32.IEEETable} + + for _, msg := range msgs { + wb.writeMessage(msg.Offset, attributes, msg.Time, msg.Key, msg.Value, cw) + } + + releaseBuffer(compressed) + return wb.Flush() +} + +func (wb *writeBuffer) writeProduceRequestV3(correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, transactionalID *string, recordBatch *recordBatch) (err error) { + + h := requestHeader{ + ApiKey: int16(produce), + ApiVersion: int16(v3), + CorrelationID: correlationID, + ClientID: clientID, + } + + h.Size = (h.size() - 4) + + sizeofNullableString(transactionalID) + + 2 + // required acks + 4 + // timeout + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 4 + // message set size + recordBatch.size + + h.writeTo(wb) + wb.writeNullableString(transactionalID) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + + recordBatch.writeTo(wb) + + return wb.Flush() +} + +func (wb *writeBuffer) writeProduceRequestV7(correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, transactionalID *string, recordBatch *recordBatch) (err error) { + + h := requestHeader{ + ApiKey: int16(produce), + ApiVersion: int16(v7), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + sizeofNullableString(transactionalID) + + 2 + // required acks + 4 + // timeout + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 4 + // message set size + recordBatch.size + + h.writeTo(wb) + wb.writeNullableString(transactionalID) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + + recordBatch.writeTo(wb) + + return wb.Flush() +} + +func (wb *writeBuffer) writeRecordBatch(attributes int16, size int32, count int, baseTime, lastTime time.Time, write func(*writeBuffer)) { + var ( + baseTimestamp = timestamp(baseTime) + lastTimestamp = timestamp(lastTime) + lastOffsetDelta = int32(count - 1) + producerID = int64(-1) // default producer id for now + producerEpoch = int16(-1) // default producer epoch for now + baseSequence = int32(-1) // default base sequence + recordCount = int32(count) // record count + writerBackup = wb.w + ) + + // dry run to compute the checksum + cw := &crc32Writer{table: crc32.MakeTable(crc32.Castagnoli)} + wb.w = cw + cw.writeInt16(attributes) // attributes, timestamp type 0 - create time, not part of a transaction, no control messages + cw.writeInt32(lastOffsetDelta) + cw.writeInt64(baseTimestamp) + cw.writeInt64(lastTimestamp) + cw.writeInt64(producerID) + cw.writeInt16(producerEpoch) + cw.writeInt32(baseSequence) + cw.writeInt32(recordCount) + write(wb) + wb.w = writerBackup + + // actual write to the output buffer + wb.writeInt64(int64(0)) + wb.writeInt32(int32(size - 12)) // 12 = batch length + base offset sizes + wb.writeInt32(-1) // partition leader epoch + wb.writeInt8(2) // magic byte + wb.writeInt32(int32(cw.crc32)) + + wb.writeInt16(attributes) + wb.writeInt32(lastOffsetDelta) + wb.writeInt64(baseTimestamp) + wb.writeInt64(lastTimestamp) + wb.writeInt64(producerID) + wb.writeInt16(producerEpoch) + wb.writeInt32(baseSequence) + wb.writeInt32(recordCount) + write(wb) +} + +func compressMessageSet(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int8, size int32, err error) { + compressed = acquireBuffer() + compressor := codec.NewWriter(compressed) + wb := &writeBuffer{w: compressor} + cw := &crc32Writer{table: crc32.IEEETable} + + for offset, msg := range msgs { + wb.writeMessage(int64(offset), 0, msg.Time, msg.Key, msg.Value, cw) + } + + if err = compressor.Close(); err != nil { + releaseBuffer(compressed) + return + } + + attributes = codec.Code() + size = messageSetSize(Message{Value: compressed.Bytes()}) + return +} + +func (wb *writeBuffer) writeMessage(offset int64, attributes int8, time time.Time, key, value []byte, cw *crc32Writer) { + const magicByte = 1 // compatible with kafka 0.10.0.0+ + + timestamp := timestamp(time) + size := messageSize(key, value) + + // dry run to compute the checksum + cw.crc32 = 0 + cw.writeInt8(magicByte) + cw.writeInt8(attributes) + cw.writeInt64(timestamp) + cw.writeBytes(key) + cw.writeBytes(value) + + // actual write to the output buffer + wb.writeInt64(offset) + wb.writeInt32(size) + wb.writeInt32(int32(cw.crc32)) + wb.writeInt8(magicByte) + wb.writeInt8(attributes) + wb.writeInt64(timestamp) + wb.writeBytes(key) + wb.writeBytes(value) +} + +// Messages with magic >2 are called records. This method writes messages using message format 2. +func (wb *writeBuffer) writeRecord(attributes int8, baseTime time.Time, offset int64, msg Message) { + timestampDelta := msg.Time.Sub(baseTime) + offsetDelta := int64(offset) + + wb.writeVarInt(int64(recordSize(&msg, timestampDelta, offsetDelta))) + wb.writeInt8(attributes) + wb.writeVarInt(int64(milliseconds(timestampDelta))) + wb.writeVarInt(offsetDelta) + + wb.writeVarBytes(msg.Key) + wb.writeVarBytes(msg.Value) + wb.writeVarArray(len(msg.Headers), func(i int) { + h := &msg.Headers[i] + wb.writeVarString(h.Key) + wb.writeVarBytes(h.Value) + }) +} + +func varIntLen(i int64) int { + u := uint64((i << 1) ^ (i >> 63)) // zig-zag encoding + n := 0 + + for u >= 0x80 { + u >>= 7 + n++ + } + + return n + 1 +} + +func varBytesLen(b []byte) int { + return varIntLen(int64(len(b))) + len(b) +} + +func varStringLen(s string) int { + return varIntLen(int64(len(s))) + len(s) +} + +func varArrayLen(n int, f func(int) int) int { + size := varIntLen(int64(n)) + for i := 0; i < n; i++ { + size += f(i) + } + return size +} + +func messageSize(key, value []byte) int32 { + return 4 + // crc + 1 + // magic byte + 1 + // attributes + 8 + // timestamp + sizeofBytes(key) + + sizeofBytes(value) +} + +func messageSetSize(msgs ...Message) (size int32) { + for _, msg := range msgs { + size += 8 + // offset + 4 + // message size + 4 + // crc + 1 + // magic byte + 1 + // attributes + 8 + // timestamp + sizeofBytes(msg.Key) + + sizeofBytes(msg.Value) + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/writer.go b/vendor/github.com/segmentio/kafka-go/writer.go new file mode 100644 index 00000000000..ef89051ca09 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/writer.go @@ -0,0 +1,1309 @@ +package kafka + +import ( + "bytes" + "context" + "errors" + "io" + "net" + "sync" + "sync/atomic" + "time" + + metadataAPI "github.com/segmentio/kafka-go/protocol/metadata" +) + +// The Writer type provides the implementation of a producer of kafka messages +// that automatically distributes messages across partitions of a single topic +// using a configurable balancing policy. +// +// Writes manage the dispatch of messages across partitions of the topic they +// are configured to write to using a Balancer, and aggregate batches to +// optimize the writes to kafka. +// +// Writers may be configured to be used synchronously or asynchronously. When +// use synchronously, calls to WriteMessages block until the messages have been +// written to kafka. In this mode, the program should inspect the error returned +// by the function and test if it an instance of kafka.WriteErrors in order to +// identify which messages have succeeded or failed, for example: +// +// // Construct a synchronous writer (the default mode). +// w := &kafka.Writer{ +// Addr: Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), +// Topic: "topic-A", +// RequiredAcks: kafka.RequireAll, +// } +// +// ... +// +// // Passing a context can prevent the operation from blocking indefinitely. +// switch err := w.WriteMessages(ctx, msgs...).(type) { +// case nil: +// case kafka.WriteErrors: +// for i := range msgs { +// if err[i] != nil { +// // handle the error writing msgs[i] +// ... +// } +// } +// default: +// // handle other errors +// ... +// } +// +// In asynchronous mode, the program may configure a completion handler on the +// writer to receive notifications of messages being written to kafka: +// +// w := &kafka.Writer{ +// Addr: Addr: kafka.TCP("localhost:9092", "localhost:9093", "localhost:9094"), +// Topic: "topic-A", +// RequiredAcks: kafka.RequireAll, +// Async: true, // make the writer asynchronous +// Completion: func(messages []kafka.Message, err error) { +// ... +// }, +// } +// +// ... +// +// // Because the writer is asynchronous, there is no need for the context to +// // be cancelled, the call will never block. +// if err := w.WriteMessages(context.Background(), msgs...); err != nil { +// // Only validation errors would be reported in this case. +// ... +// } +// +// Methods of Writer are safe to use concurrently from multiple goroutines, +// however the writer configuration should not be modified after first use. +type Writer struct { + // Address of the kafka cluster that this writer is configured to send + // messages to. + // + // This field is required, attempting to write messages to a writer with a + // nil address will error. + Addr net.Addr + + // Topic is the name of the topic that the writer will produce messages to. + // + // Setting this field or not is a mutually exclusive option. If you set Topic + // here, you must not set Topic for any produced Message. Otherwise, if you do + // not set Topic, every Message must have Topic specified. + Topic string + + // The balancer used to distribute messages across partitions. + // + // The default is to use a round-robin distribution. + Balancer Balancer + + // Limit on how many attempts will be made to deliver a message. + // + // The default is to try at most 10 times. + MaxAttempts int + + // WriteBackoffMin optionally sets the smallest amount of time the writer waits before + // it attempts to write a batch of messages + // + // Default: 100ms + WriteBackoffMin time.Duration + + // WriteBackoffMax optionally sets the maximum amount of time the writer waits before + // it attempts to write a batch of messages + // + // Default: 1s + WriteBackoffMax time.Duration + + // Limit on how many messages will be buffered before being sent to a + // partition. + // + // The default is to use a target batch size of 100 messages. + BatchSize int + + // Limit the maximum size of a request in bytes before being sent to + // a partition. + // + // The default is to use a kafka default value of 1048576. + BatchBytes int64 + + // Time limit on how often incomplete message batches will be flushed to + // kafka. + // + // The default is to flush at least every second. + BatchTimeout time.Duration + + // Timeout for read operations performed by the Writer. + // + // Defaults to 10 seconds. + ReadTimeout time.Duration + + // Timeout for write operation performed by the Writer. + // + // Defaults to 10 seconds. + WriteTimeout time.Duration + + // Number of acknowledges from partition replicas required before receiving + // a response to a produce request, the following values are supported: + // + // RequireNone (0) fire-and-forget, do not wait for acknowledgements from the + // RequireOne (1) wait for the leader to acknowledge the writes + // RequireAll (-1) wait for the full ISR to acknowledge the writes + // + // Defaults to RequireNone. + RequiredAcks RequiredAcks + + // Setting this flag to true causes the WriteMessages method to never block. + // It also means that errors are ignored since the caller will not receive + // the returned value. Use this only if you don't care about guarantees of + // whether the messages were written to kafka. + // + // Defaults to false. + Async bool + + // An optional function called when the writer succeeds or fails the + // delivery of messages to a kafka partition. When writing the messages + // fails, the `err` parameter will be non-nil. + // + // The messages that the Completion function is called with have their + // topic, partition, offset, and time set based on the Produce responses + // received from kafka. All messages passed to a call to the function have + // been written to the same partition. The keys and values of messages are + // referencing the original byte slices carried by messages in the calls to + // WriteMessages. + // + // The function is called from goroutines started by the writer. Calls to + // Close will block on the Completion function calls. When the Writer is + // not writing asynchronously, the WriteMessages call will also block on + // Completion function, which is a useful guarantee if the byte slices + // for the message keys and values are intended to be reused after the + // WriteMessages call returned. + // + // If a completion function panics, the program terminates because the + // panic is not recovered by the writer and bubbles up to the top of the + // goroutine's call stack. + Completion func(messages []Message, err error) + + // Compression set the compression codec to be used to compress messages. + Compression Compression + + // If not nil, specifies a logger used to report internal changes within the + // writer. + Logger Logger + + // ErrorLogger is the logger used to report errors. If nil, the writer falls + // back to using Logger instead. + ErrorLogger Logger + + // A transport used to send messages to kafka clusters. + // + // If nil, DefaultTransport is used. + Transport RoundTripper + + // AllowAutoTopicCreation notifies writer to create topic if missing. + AllowAutoTopicCreation bool + + // Manages the current set of partition-topic writers. + group sync.WaitGroup + mutex sync.Mutex + closed bool + writers map[topicPartition]*partitionWriter + + // writer stats are all made of atomic values, no need for synchronization. + // Use a pointer to ensure 64-bit alignment of the values. The once value is + // used to lazily create the value when first used, allowing programs to use + // the zero-value value of Writer. + once sync.Once + *writerStats + + // If no balancer is configured, the writer uses this one. RoundRobin values + // are safe to use concurrently from multiple goroutines, there is no need + // for extra synchronization to access this field. + roundRobin RoundRobin + + // non-nil when a transport was created by NewWriter, remove in 1.0. + transport *Transport +} + +// WriterConfig is a configuration type used to create new instances of Writer. +// +// DEPRECATED: writer values should be configured directly by assigning their +// exported fields. This type is kept for backward compatibility, and will be +// removed in version 1.0. +type WriterConfig struct { + // The list of brokers used to discover the partitions available on the + // kafka cluster. + // + // This field is required, attempting to create a writer with an empty list + // of brokers will panic. + Brokers []string + + // The topic that the writer will produce messages to. + // + // If provided, this will be used to set the topic for all produced messages. + // If not provided, each Message must specify a topic for itself. This must be + // mutually exclusive, otherwise the Writer will return an error. + Topic string + + // The dialer used by the writer to establish connections to the kafka + // cluster. + // + // If nil, the default dialer is used instead. + Dialer *Dialer + + // The balancer used to distribute messages across partitions. + // + // The default is to use a round-robin distribution. + Balancer Balancer + + // Limit on how many attempts will be made to deliver a message. + // + // The default is to try at most 10 times. + MaxAttempts int + + // DEPRECATED: in versions prior to 0.4, the writer used channels internally + // to dispatch messages to partitions. This has been replaced by an in-memory + // aggregation of batches which uses shared state instead of message passing, + // making this option unnecessary. + QueueCapacity int + + // Limit on how many messages will be buffered before being sent to a + // partition. + // + // The default is to use a target batch size of 100 messages. + BatchSize int + + // Limit the maximum size of a request in bytes before being sent to + // a partition. + // + // The default is to use a kafka default value of 1048576. + BatchBytes int + + // Time limit on how often incomplete message batches will be flushed to + // kafka. + // + // The default is to flush at least every second. + BatchTimeout time.Duration + + // Timeout for read operations performed by the Writer. + // + // Defaults to 10 seconds. + ReadTimeout time.Duration + + // Timeout for write operation performed by the Writer. + // + // Defaults to 10 seconds. + WriteTimeout time.Duration + + // DEPRECATED: in versions prior to 0.4, the writer used to maintain a cache + // the topic layout. With the change to use a transport to manage connections, + // the responsibility of syncing the cluster layout has been delegated to the + // transport. + RebalanceInterval time.Duration + + // DEPRECATED: in versions prior to 0.4, the writer used to manage connections + // to the kafka cluster directly. With the change to use a transport to manage + // connections, the writer has no connections to manage directly anymore. + IdleConnTimeout time.Duration + + // Number of acknowledges from partition replicas required before receiving + // a response to a produce request. The default is -1, which means to wait for + // all replicas, and a value above 0 is required to indicate how many replicas + // should acknowledge a message to be considered successful. + RequiredAcks int + + // Setting this flag to true causes the WriteMessages method to never block. + // It also means that errors are ignored since the caller will not receive + // the returned value. Use this only if you don't care about guarantees of + // whether the messages were written to kafka. + Async bool + + // CompressionCodec set the codec to be used to compress Kafka messages. + CompressionCodec + + // If not nil, specifies a logger used to report internal changes within the + // writer. + Logger Logger + + // ErrorLogger is the logger used to report errors. If nil, the writer falls + // back to using Logger instead. + ErrorLogger Logger +} + +type topicPartition struct { + topic string + partition int32 +} + +// Validate method validates WriterConfig properties. +func (config *WriterConfig) Validate() error { + if len(config.Brokers) == 0 { + return errors.New("cannot create a kafka writer with an empty list of brokers") + } + return nil +} + +// WriterStats is a data structure returned by a call to Writer.Stats that +// exposes details about the behavior of the writer. +type WriterStats struct { + Writes int64 `metric:"kafka.writer.write.count" type:"counter"` + Messages int64 `metric:"kafka.writer.message.count" type:"counter"` + Bytes int64 `metric:"kafka.writer.message.bytes" type:"counter"` + Errors int64 `metric:"kafka.writer.error.count" type:"counter"` + + BatchTime DurationStats `metric:"kafka.writer.batch.seconds"` + BatchQueueTime DurationStats `metric:"kafka.writer.batch.queue.seconds"` + WriteTime DurationStats `metric:"kafka.writer.write.seconds"` + WaitTime DurationStats `metric:"kafka.writer.wait.seconds"` + Retries int64 `metric:"kafka.writer.retries.count" type:"counter"` + BatchSize SummaryStats `metric:"kafka.writer.batch.size"` + BatchBytes SummaryStats `metric:"kafka.writer.batch.bytes"` + + MaxAttempts int64 `metric:"kafka.writer.attempts.max" type:"gauge"` + WriteBackoffMin time.Duration `metric:"kafka.writer.backoff.min" type:"gauge"` + WriteBackoffMax time.Duration `metric:"kafka.writer.backoff.max" type:"gauge"` + MaxBatchSize int64 `metric:"kafka.writer.batch.max" type:"gauge"` + BatchTimeout time.Duration `metric:"kafka.writer.batch.timeout" type:"gauge"` + ReadTimeout time.Duration `metric:"kafka.writer.read.timeout" type:"gauge"` + WriteTimeout time.Duration `metric:"kafka.writer.write.timeout" type:"gauge"` + RequiredAcks int64 `metric:"kafka.writer.acks.required" type:"gauge"` + Async bool `metric:"kafka.writer.async" type:"gauge"` + + Topic string `tag:"topic"` + + // DEPRECATED: these fields will only be reported for backward compatibility + // if the Writer was constructed with NewWriter. + Dials int64 `metric:"kafka.writer.dial.count" type:"counter"` + DialTime DurationStats `metric:"kafka.writer.dial.seconds"` + + // DEPRECATED: these fields were meaningful prior to kafka-go 0.4, changes + // to the internal implementation and the introduction of the transport type + // made them unnecessary. + // + // The values will be zero but are left for backward compatibility to avoid + // breaking programs that used these fields. + Rebalances int64 + RebalanceInterval time.Duration + QueueLength int64 + QueueCapacity int64 + ClientID string +} + +// writerStats is a struct that contains statistics on a writer. +// +// Since atomic is used to mutate the statistics the values must be 64-bit aligned. +// This is easily accomplished by always allocating this struct directly, (i.e. using a pointer to the struct). +// See https://golang.org/pkg/sync/atomic/#pkg-note-BUG +type writerStats struct { + dials counter + writes counter + messages counter + bytes counter + errors counter + dialTime summary + batchTime summary + batchQueueTime summary + writeTime summary + waitTime summary + retries counter + batchSize summary + batchSizeBytes summary +} + +// NewWriter creates and returns a new Writer configured with config. +// +// DEPRECATED: Writer value can be instantiated and configured directly, +// this function is retained for backward compatibility and will be removed +// in version 1.0. +func NewWriter(config WriterConfig) *Writer { + if err := config.Validate(); err != nil { + panic(err) + } + + if config.Dialer == nil { + config.Dialer = DefaultDialer + } + + if config.Balancer == nil { + config.Balancer = &RoundRobin{} + } + + // Converts the pre-0.4 Dialer API into a Transport. + kafkaDialer := DefaultDialer + if config.Dialer != nil { + kafkaDialer = config.Dialer + } + + dialer := (&net.Dialer{ + Timeout: kafkaDialer.Timeout, + Deadline: kafkaDialer.Deadline, + LocalAddr: kafkaDialer.LocalAddr, + DualStack: kafkaDialer.DualStack, + FallbackDelay: kafkaDialer.FallbackDelay, + KeepAlive: kafkaDialer.KeepAlive, + }) + + var resolver Resolver + if r, ok := kafkaDialer.Resolver.(*net.Resolver); ok { + dialer.Resolver = r + } else { + resolver = kafkaDialer.Resolver + } + + stats := new(writerStats) + // For backward compatibility with the pre-0.4 APIs, support custom + // resolvers by wrapping the dial function. + dial := func(ctx context.Context, network, addr string) (net.Conn, error) { + start := time.Now() + defer func() { + stats.dials.observe(1) + stats.dialTime.observe(int64(time.Since(start))) + }() + address, err := lookupHost(ctx, addr, resolver) + if err != nil { + return nil, err + } + return dialer.DialContext(ctx, network, address) + } + + idleTimeout := config.IdleConnTimeout + if idleTimeout == 0 { + // Historical default value of WriterConfig.IdleTimeout, 9 minutes seems + // like it is way too long when there is no ping mechanism in the kafka + // protocol. + idleTimeout = 9 * time.Minute + } + + metadataTTL := config.RebalanceInterval + if metadataTTL == 0 { + // Historical default value of WriterConfig.RebalanceInterval. + metadataTTL = 15 * time.Second + } + + transport := &Transport{ + Dial: dial, + SASL: kafkaDialer.SASLMechanism, + TLS: kafkaDialer.TLS, + ClientID: kafkaDialer.ClientID, + IdleTimeout: idleTimeout, + MetadataTTL: metadataTTL, + } + + w := &Writer{ + Addr: TCP(config.Brokers...), + Topic: config.Topic, + MaxAttempts: config.MaxAttempts, + BatchSize: config.BatchSize, + Balancer: config.Balancer, + BatchBytes: int64(config.BatchBytes), + BatchTimeout: config.BatchTimeout, + ReadTimeout: config.ReadTimeout, + WriteTimeout: config.WriteTimeout, + RequiredAcks: RequiredAcks(config.RequiredAcks), + Async: config.Async, + Logger: config.Logger, + ErrorLogger: config.ErrorLogger, + Transport: transport, + transport: transport, + writerStats: stats, + } + + if config.RequiredAcks == 0 { + // Historically the writers created by NewWriter have used "all" as the + // default value when 0 was specified. + w.RequiredAcks = RequireAll + } + + if config.CompressionCodec != nil { + w.Compression = Compression(config.CompressionCodec.Code()) + } + + return w +} + +// enter is called by WriteMessages to indicate that a new inflight operation +// has started, which helps synchronize with Close and ensure that the method +// does not return until all inflight operations were completed. +func (w *Writer) enter() bool { + w.mutex.Lock() + defer w.mutex.Unlock() + if w.closed { + return false + } + w.group.Add(1) + return true +} + +// leave is called by WriteMessages to indicate that the inflight operation has +// completed. +func (w *Writer) leave() { w.group.Done() } + +// spawn starts a new asynchronous operation on the writer. This method is used +// instead of starting goroutines inline to help manage the state of the +// writer's wait group. The wait group is used to block Close calls until all +// inflight operations have completed, therefore automatically including those +// started with calls to spawn. +func (w *Writer) spawn(f func()) { + w.group.Add(1) + go func() { + defer w.group.Done() + f() + }() +} + +// Close flushes pending writes, and waits for all writes to complete before +// returning. Calling Close also prevents new writes from being submitted to +// the writer, further calls to WriteMessages and the like will fail with +// io.ErrClosedPipe. +func (w *Writer) Close() error { + w.mutex.Lock() + // Marking the writer as closed here causes future calls to WriteMessages to + // fail with io.ErrClosedPipe. Mutation of this field is synchronized on the + // writer's mutex to ensure that no more increments of the wait group are + // performed afterwards (which could otherwise race with the Wait below). + w.closed = true + + // close all writers to trigger any pending batches + for _, writer := range w.writers { + writer.close() + } + + for partition := range w.writers { + delete(w.writers, partition) + } + + w.mutex.Unlock() + w.group.Wait() + + if w.transport != nil { + w.transport.CloseIdleConnections() + } + + return nil +} + +// WriteMessages writes a batch of messages to the kafka topic configured on this +// writer. +// +// Unless the writer was configured to write messages asynchronously, the method +// blocks until all messages have been written, or until the maximum number of +// attempts was reached. +// +// When sending synchronously and the writer's batch size is configured to be +// greater than 1, this method blocks until either a full batch can be assembled +// or the batch timeout is reached. The batch size and timeouts are evaluated +// per partition, so the choice of Balancer can also influence the flushing +// behavior. For example, the Hash balancer will require on average N * batch +// size messages to trigger a flush where N is the number of partitions. The +// best way to achieve good batching behavior is to share one Writer amongst +// multiple go routines. +// +// When the method returns an error, it may be of type kafka.WriteError to allow +// the caller to determine the status of each message. +// +// The context passed as first argument may also be used to asynchronously +// cancel the operation. Note that in this case there are no guarantees made on +// whether messages were written to kafka, they might also still be written +// after this method has already returned, therefore it is important to not +// modify byte slices of passed messages if WriteMessages returned early due +// to a canceled context. +// The program should assume that the whole batch failed and re-write the +// messages later (which could then cause duplicates). +func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error { + if w.Addr == nil { + return errors.New("kafka.(*Writer).WriteMessages: cannot create a kafka writer with a nil address") + } + + if !w.enter() { + return io.ErrClosedPipe + } + defer w.leave() + + if len(msgs) == 0 { + return nil + } + + balancer := w.balancer() + batchBytes := w.batchBytes() + + for i := range msgs { + n := int64(msgs[i].totalSize()) + if n > batchBytes { + // This error is left for backward compatibility with historical + // behavior, but it can yield O(N^2) behaviors. The expectations + // are that the program will check if WriteMessages returned a + // MessageTooLargeError, discard the message that was exceeding + // the maximum size, and try again. + return messageTooLarge(msgs, i) + } + } + + // We use int32 here to half the memory footprint (compared to using int + // on 64 bits architectures). We map lists of the message indexes instead + // of the message values for the same reason, int32 is 4 bytes, vs a full + // Message value which is 100+ bytes and contains pointers and contributes + // to increasing GC work. + assignments := make(map[topicPartition][]int32) + + for i, msg := range msgs { + topic, err := w.chooseTopic(msg) + if err != nil { + return err + } + + numPartitions, err := w.partitions(ctx, topic) + if err != nil { + return err + } + + partition := balancer.Balance(msg, loadCachedPartitions(numPartitions)...) + + key := topicPartition{ + topic: topic, + partition: int32(partition), + } + + assignments[key] = append(assignments[key], int32(i)) + } + + batches := w.batchMessages(msgs, assignments) + if w.Async { + return nil + } + + done := ctx.Done() + hasErrors := false + for batch := range batches { + select { + case <-done: + return ctx.Err() + case <-batch.done: + if batch.err != nil { + hasErrors = true + } + } + } + + if !hasErrors { + return nil + } + + werr := make(WriteErrors, len(msgs)) + + for batch, indexes := range batches { + for _, i := range indexes { + werr[i] = batch.err + } + } + return werr +} + +func (w *Writer) batchMessages(messages []Message, assignments map[topicPartition][]int32) map[*writeBatch][]int32 { + var batches map[*writeBatch][]int32 + if !w.Async { + batches = make(map[*writeBatch][]int32, len(assignments)) + } + + w.mutex.Lock() + defer w.mutex.Unlock() + + if w.writers == nil { + w.writers = map[topicPartition]*partitionWriter{} + } + + for key, indexes := range assignments { + writer := w.writers[key] + if writer == nil { + writer = newPartitionWriter(w, key) + w.writers[key] = writer + } + wbatches := writer.writeMessages(messages, indexes) + + for batch, idxs := range wbatches { + batches[batch] = idxs + } + } + + return batches +} + +func (w *Writer) produce(key topicPartition, batch *writeBatch) (*ProduceResponse, error) { + timeout := w.writeTimeout() + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + return w.client(timeout).Produce(ctx, &ProduceRequest{ + Partition: int(key.partition), + Topic: key.topic, + RequiredAcks: w.RequiredAcks, + Compression: w.Compression, + Records: &writerRecords{ + msgs: batch.msgs, + }, + }) +} + +func (w *Writer) partitions(ctx context.Context, topic string) (int, error) { + client := w.client(w.readTimeout()) + // Here we use the transport directly as an optimization to avoid the + // construction of temporary request and response objects made by the + // (*Client).Metadata API. + // + // It is expected that the transport will optimize this request by + // caching recent results (the kafka.Transport types does). + r, err := client.transport().RoundTrip(ctx, client.Addr, &metadataAPI.Request{ + TopicNames: []string{topic}, + AllowAutoTopicCreation: w.AllowAutoTopicCreation, + }) + if err != nil { + return 0, err + } + for _, t := range r.(*metadataAPI.Response).Topics { + if t.Name == topic { + // This should always hit, unless kafka has a bug. + if t.ErrorCode != 0 { + return 0, Error(t.ErrorCode) + } + return len(t.Partitions), nil + } + } + return 0, UnknownTopicOrPartition +} + +func (w *Writer) client(timeout time.Duration) *Client { + return &Client{ + Addr: w.Addr, + Transport: w.Transport, + Timeout: timeout, + } +} + +func (w *Writer) balancer() Balancer { + if w.Balancer != nil { + return w.Balancer + } + return &w.roundRobin +} + +func (w *Writer) maxAttempts() int { + if w.MaxAttempts > 0 { + return w.MaxAttempts + } + // TODO: this is a very high default, if something has failed 9 times it + // seems unlikely it will succeed on the 10th attempt. However, it does + // carry the risk to greatly increase the volume of requests sent to the + // kafka cluster. We should consider reducing this default (3?). + return 10 +} + +func (w *Writer) writeBackoffMin() time.Duration { + if w.WriteBackoffMin > 0 { + return w.WriteBackoffMin + } + return 100 * time.Millisecond +} + +func (w *Writer) writeBackoffMax() time.Duration { + if w.WriteBackoffMax > 0 { + return w.WriteBackoffMax + } + return 1 * time.Second +} + +func (w *Writer) batchSize() int { + if w.BatchSize > 0 { + return w.BatchSize + } + return 100 +} + +func (w *Writer) batchBytes() int64 { + if w.BatchBytes > 0 { + return w.BatchBytes + } + return 1048576 +} + +func (w *Writer) batchTimeout() time.Duration { + if w.BatchTimeout > 0 { + return w.BatchTimeout + } + return 1 * time.Second +} + +func (w *Writer) readTimeout() time.Duration { + if w.ReadTimeout > 0 { + return w.ReadTimeout + } + return 10 * time.Second +} + +func (w *Writer) writeTimeout() time.Duration { + if w.WriteTimeout > 0 { + return w.WriteTimeout + } + return 10 * time.Second +} + +func (w *Writer) withLogger(do func(Logger)) { + if w.Logger != nil { + do(w.Logger) + } +} + +func (w *Writer) withErrorLogger(do func(Logger)) { + if w.ErrorLogger != nil { + do(w.ErrorLogger) + } else { + w.withLogger(do) + } +} + +func (w *Writer) stats() *writerStats { + w.once.Do(func() { + // This field is not nil when the writer was constructed with NewWriter + // to share the value with the dial function and count dials. + if w.writerStats == nil { + w.writerStats = new(writerStats) + } + }) + return w.writerStats +} + +// Stats returns a snapshot of the writer stats since the last time the method +// was called, or since the writer was created if it is called for the first +// time. +// +// A typical use of this method is to spawn a goroutine that will periodically +// call Stats on a kafka writer and report the metrics to a stats collection +// system. +func (w *Writer) Stats() WriterStats { + stats := w.stats() + return WriterStats{ + Dials: stats.dials.snapshot(), + Writes: stats.writes.snapshot(), + Messages: stats.messages.snapshot(), + Bytes: stats.bytes.snapshot(), + Errors: stats.errors.snapshot(), + DialTime: stats.dialTime.snapshotDuration(), + BatchTime: stats.batchTime.snapshotDuration(), + BatchQueueTime: stats.batchQueueTime.snapshotDuration(), + WriteTime: stats.writeTime.snapshotDuration(), + WaitTime: stats.waitTime.snapshotDuration(), + Retries: stats.retries.snapshot(), + BatchSize: stats.batchSize.snapshot(), + BatchBytes: stats.batchSizeBytes.snapshot(), + MaxAttempts: int64(w.MaxAttempts), + WriteBackoffMin: w.WriteBackoffMin, + WriteBackoffMax: w.WriteBackoffMax, + MaxBatchSize: int64(w.BatchSize), + BatchTimeout: w.BatchTimeout, + ReadTimeout: w.ReadTimeout, + WriteTimeout: w.WriteTimeout, + RequiredAcks: int64(w.RequiredAcks), + Async: w.Async, + Topic: w.Topic, + } +} + +func (w *Writer) chooseTopic(msg Message) (string, error) { + // w.Topic and msg.Topic are mutually exclusive, meaning only 1 must be set + // otherwise we will return an error. + if w.Topic != "" && msg.Topic != "" { + return "", errors.New("kafka.(*Writer): Topic must not be specified for both Writer and Message") + } else if w.Topic == "" && msg.Topic == "" { + return "", errors.New("kafka.(*Writer): Topic must be specified for Writer or Message") + } + + // now we choose the topic, depending on which one is not empty + if msg.Topic != "" { + return msg.Topic, nil + } + + return w.Topic, nil +} + +type batchQueue struct { + queue []*writeBatch + + // Pointers are used here to make `go vet` happy, and avoid copying mutexes. + // It may be better to revert these to non-pointers and avoid the copies in + // a different way. + mutex *sync.Mutex + cond *sync.Cond + + closed bool +} + +func (b *batchQueue) Put(batch *writeBatch) bool { + b.cond.L.Lock() + defer b.cond.L.Unlock() + defer b.cond.Broadcast() + + if b.closed { + return false + } + b.queue = append(b.queue, batch) + return true +} + +func (b *batchQueue) Get() *writeBatch { + b.cond.L.Lock() + defer b.cond.L.Unlock() + + for len(b.queue) == 0 && !b.closed { + b.cond.Wait() + } + + if len(b.queue) == 0 { + return nil + } + + batch := b.queue[0] + b.queue[0] = nil + b.queue = b.queue[1:] + + return batch +} + +func (b *batchQueue) Close() { + b.cond.L.Lock() + defer b.cond.L.Unlock() + defer b.cond.Broadcast() + + b.closed = true +} + +func newBatchQueue(initialSize int) batchQueue { + bq := batchQueue{ + queue: make([]*writeBatch, 0, initialSize), + mutex: &sync.Mutex{}, + cond: &sync.Cond{}, + } + + bq.cond.L = bq.mutex + + return bq +} + +// partitionWriter is a writer for a topic-partion pair. It maintains messaging order +// across batches of messages. +type partitionWriter struct { + meta topicPartition + queue batchQueue + + mutex sync.Mutex + currBatch *writeBatch + + // reference to the writer that owns this batch. Used for the produce logic + // as well as stat tracking + w *Writer +} + +func newPartitionWriter(w *Writer, key topicPartition) *partitionWriter { + writer := &partitionWriter{ + meta: key, + queue: newBatchQueue(10), + w: w, + } + w.spawn(writer.writeBatches) + return writer +} + +func (ptw *partitionWriter) writeBatches() { + for { + batch := ptw.queue.Get() + + // The only time we can return nil is when the queue is closed + // and empty. If the queue is closed that means + // the Writer is closed so once we're here it's time to exit. + if batch == nil { + return + } + + ptw.writeBatch(batch) + } +} + +func (ptw *partitionWriter) writeMessages(msgs []Message, indexes []int32) map[*writeBatch][]int32 { + ptw.mutex.Lock() + defer ptw.mutex.Unlock() + + batchSize := ptw.w.batchSize() + batchBytes := ptw.w.batchBytes() + + var batches map[*writeBatch][]int32 + if !ptw.w.Async { + batches = make(map[*writeBatch][]int32, 1) + } + + for _, i := range indexes { + assignMessage: + batch := ptw.currBatch + if batch == nil { + batch = ptw.newWriteBatch() + ptw.currBatch = batch + } + if !batch.add(msgs[i], batchSize, batchBytes) { + batch.trigger() + ptw.queue.Put(batch) + ptw.currBatch = nil + goto assignMessage + } + + if batch.full(batchSize, batchBytes) { + batch.trigger() + ptw.queue.Put(batch) + ptw.currBatch = nil + } + + if !ptw.w.Async { + batches[batch] = append(batches[batch], i) + } + } + return batches +} + +// ptw.w can be accessed here because this is called with the lock ptw.mutex already held. +func (ptw *partitionWriter) newWriteBatch() *writeBatch { + batch := newWriteBatch(time.Now(), ptw.w.batchTimeout()) + ptw.w.spawn(func() { ptw.awaitBatch(batch) }) + return batch +} + +// awaitBatch waits for a batch to either fill up or time out. +// If the batch is full it only stops the timer, if the timer +// expires it will queue the batch for writing if needed. +func (ptw *partitionWriter) awaitBatch(batch *writeBatch) { + select { + case <-batch.timer.C: + ptw.mutex.Lock() + // detach the batch from the writer if we're still attached + // and queue for writing. + // Only the current batch can expire, all previous batches were already written to the queue. + // If writeMesseages locks pw.mutex after the timer fires but before this goroutine + // can lock pw.mutex it will either have filled the batch and enqueued it which will mean + // pw.currBatch != batch so we just move on. + // Otherwise, we detach the batch from the ptWriter and enqueue it for writing. + if ptw.currBatch == batch { + ptw.queue.Put(batch) + ptw.currBatch = nil + } + ptw.mutex.Unlock() + case <-batch.ready: + // The batch became full, it was removed from the ptwriter and its + // ready channel was closed. We need to close the timer to avoid + // having it leak until it expires. + batch.timer.Stop() + } + stats := ptw.w.stats() + stats.batchQueueTime.observe(int64(time.Since(batch.time))) +} + +func (ptw *partitionWriter) writeBatch(batch *writeBatch) { + stats := ptw.w.stats() + stats.batchTime.observe(int64(time.Since(batch.time))) + stats.batchSize.observe(int64(len(batch.msgs))) + stats.batchSizeBytes.observe(batch.bytes) + + var res *ProduceResponse + var err error + key := ptw.meta + for attempt, maxAttempts := 0, ptw.w.maxAttempts(); attempt < maxAttempts; attempt++ { + if attempt != 0 { + stats.retries.observe(1) + // TODO: should there be a way to asynchronously cancel this + // operation? + // + // * If all goroutines that added message to this batch have stopped + // waiting for it, should we abort? + // + // * If the writer has been closed? It reduces the durability + // guarantees to abort, but may be better to avoid long wait times + // on close. + // + delay := backoff(attempt, ptw.w.writeBackoffMin(), ptw.w.writeBackoffMax()) + ptw.w.withLogger(func(log Logger) { + log.Printf("backing off %s writing %d messages to %s (partition: %d)", delay, len(batch.msgs), key.topic, key.partition) + }) + time.Sleep(delay) + } + + ptw.w.withLogger(func(log Logger) { + log.Printf("writing %d messages to %s (partition: %d)", len(batch.msgs), key.topic, key.partition) + }) + + start := time.Now() + res, err = ptw.w.produce(key, batch) + + stats.writes.observe(1) + stats.messages.observe(int64(len(batch.msgs))) + stats.bytes.observe(batch.bytes) + // stats.writeTime used to report the duration of WriteMessages, but the + // implementation was broken and reporting values in the nanoseconds + // range. In kafka-go 0.4, we recylced this value to instead report the + // duration of produce requests, and changed the stats.waitTime value to + // report the time that kafka has throttled the requests for. + stats.writeTime.observe(int64(time.Since(start))) + + if res != nil { + err = res.Error + stats.waitTime.observe(int64(res.Throttle)) + } + + if err == nil { + break + } + + stats.errors.observe(1) + + ptw.w.withErrorLogger(func(log Logger) { + log.Printf("error writing messages to %s (partition %d): %s", key.topic, key.partition, err) + }) + + if !isTemporary(err) && !isTransientNetworkError(err) { + break + } + } + + if res != nil { + for i := range batch.msgs { + m := &batch.msgs[i] + m.Topic = key.topic + m.Partition = int(key.partition) + m.Offset = res.BaseOffset + int64(i) + + if m.Time.IsZero() { + m.Time = res.LogAppendTime + } + } + } + + if ptw.w.Completion != nil { + ptw.w.Completion(batch.msgs, err) + } + + batch.complete(err) +} + +func (ptw *partitionWriter) close() { + ptw.mutex.Lock() + defer ptw.mutex.Unlock() + + if ptw.currBatch != nil { + batch := ptw.currBatch + ptw.queue.Put(batch) + ptw.currBatch = nil + batch.trigger() + } + + ptw.queue.Close() +} + +type writeBatch struct { + time time.Time + msgs []Message + size int + bytes int64 + ready chan struct{} + done chan struct{} + timer *time.Timer + err error // result of the batch completion +} + +func newWriteBatch(now time.Time, timeout time.Duration) *writeBatch { + return &writeBatch{ + time: now, + ready: make(chan struct{}), + done: make(chan struct{}), + timer: time.NewTimer(timeout), + } +} + +func (b *writeBatch) add(msg Message, maxSize int, maxBytes int64) bool { + bytes := int64(msg.totalSize()) + + if b.size > 0 && (b.bytes+bytes) > maxBytes { + return false + } + + if cap(b.msgs) == 0 { + b.msgs = make([]Message, 0, maxSize) + } + + b.msgs = append(b.msgs, msg) + b.size++ + b.bytes += bytes + return true +} + +func (b *writeBatch) full(maxSize int, maxBytes int64) bool { + return b.size >= maxSize || b.bytes >= maxBytes +} + +func (b *writeBatch) trigger() { + close(b.ready) +} + +func (b *writeBatch) complete(err error) { + b.err = err + close(b.done) +} + +type writerRecords struct { + msgs []Message + index int + record Record + key bytesReadCloser + value bytesReadCloser +} + +func (r *writerRecords) ReadRecord() (*Record, error) { + if r.index >= 0 && r.index < len(r.msgs) { + m := &r.msgs[r.index] + r.index++ + r.record = Record{ + Time: m.Time, + Headers: m.Headers, + } + if m.Key != nil { + r.key.Reset(m.Key) + r.record.Key = &r.key + } + if m.Value != nil { + r.value.Reset(m.Value) + r.record.Value = &r.value + } + return &r.record, nil + } + return nil, io.EOF +} + +type bytesReadCloser struct{ bytes.Reader } + +func (*bytesReadCloser) Close() error { return nil } + +// A cache of []int values passed to balancers of writers, used to amortize the +// heap allocation of the partition index lists. +// +// With hindsight, the use of `...int` to pass the partition list to Balancers +// was not the best design choice: kafka partition numbers are monotonically +// increasing, we could have simply passed the number of partitions instead. +// If we ever revisit this API, we can hopefully remove this cache. +var partitionsCache atomic.Value + +func loadCachedPartitions(numPartitions int) []int { + partitions, ok := partitionsCache.Load().([]int) + if ok && len(partitions) >= numPartitions { + return partitions[:numPartitions] + } + + const alignment = 128 + n := ((numPartitions / alignment) + 1) * alignment + + partitions = make([]int, n) + for i := range partitions { + partitions[i] = i + } + + partitionsCache.Store(partitions) + return partitions[:numPartitions] +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 15855baaef8..aea31297cba 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -910,6 +910,8 @@ github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/s2 +github.com/klauspost/compress/snappy github.com/klauspost/compress/zlib github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash @@ -1141,6 +1143,56 @@ github.com/ryanuber/go-glob # github.com/samber/lo v1.37.0 ## explicit; go 1.18 github.com/samber/lo +# github.com/segmentio/kafka-go v0.4.42 +## explicit; go 1.15 +github.com/segmentio/kafka-go +github.com/segmentio/kafka-go/compress +github.com/segmentio/kafka-go/compress/gzip +github.com/segmentio/kafka-go/compress/lz4 +github.com/segmentio/kafka-go/compress/snappy +github.com/segmentio/kafka-go/compress/zstd +github.com/segmentio/kafka-go/protocol +github.com/segmentio/kafka-go/protocol/addoffsetstotxn +github.com/segmentio/kafka-go/protocol/addpartitionstotxn +github.com/segmentio/kafka-go/protocol/alterclientquotas +github.com/segmentio/kafka-go/protocol/alterconfigs +github.com/segmentio/kafka-go/protocol/alterpartitionreassignments +github.com/segmentio/kafka-go/protocol/apiversions +github.com/segmentio/kafka-go/protocol/consumer +github.com/segmentio/kafka-go/protocol/createacls +github.com/segmentio/kafka-go/protocol/createpartitions +github.com/segmentio/kafka-go/protocol/createtopics +github.com/segmentio/kafka-go/protocol/deletegroups +github.com/segmentio/kafka-go/protocol/deletetopics +github.com/segmentio/kafka-go/protocol/describeclientquotas +github.com/segmentio/kafka-go/protocol/describeconfigs +github.com/segmentio/kafka-go/protocol/describegroups +github.com/segmentio/kafka-go/protocol/electleaders +github.com/segmentio/kafka-go/protocol/endtxn +github.com/segmentio/kafka-go/protocol/fetch +github.com/segmentio/kafka-go/protocol/findcoordinator +github.com/segmentio/kafka-go/protocol/heartbeat +github.com/segmentio/kafka-go/protocol/incrementalalterconfigs +github.com/segmentio/kafka-go/protocol/initproducerid +github.com/segmentio/kafka-go/protocol/joingroup +github.com/segmentio/kafka-go/protocol/leavegroup +github.com/segmentio/kafka-go/protocol/listgroups +github.com/segmentio/kafka-go/protocol/listoffsets +github.com/segmentio/kafka-go/protocol/metadata +github.com/segmentio/kafka-go/protocol/offsetcommit +github.com/segmentio/kafka-go/protocol/offsetdelete +github.com/segmentio/kafka-go/protocol/offsetfetch +github.com/segmentio/kafka-go/protocol/produce +github.com/segmentio/kafka-go/protocol/saslauthenticate +github.com/segmentio/kafka-go/protocol/saslhandshake +github.com/segmentio/kafka-go/protocol/syncgroup +github.com/segmentio/kafka-go/protocol/txnoffsetcommit +github.com/segmentio/kafka-go/sasl +github.com/segmentio/kafka-go/sasl/plain +github.com/segmentio/kafka-go/sasl/scram +# github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 v0.1.0 +## explicit; go 1.15 +github.com/segmentio/kafka-go/sasl/aws_msk_iam_v2 # github.com/shopspring/decimal v1.3.1 ## explicit; go 1.13 github.com/shopspring/decimal